repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
Duoxilian/home-assistant
|
homeassistant/components/switch/orvibo.py
|
29
|
3065
|
"""
Support for Orvibo S20 Wifi Smart Switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.orvibo/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_SWITCHES, CONF_MAC, CONF_DISCOVERY)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['orvibo==1.1.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Orvibo S20 Switch'
DEFAULT_DISCOVERY = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SWITCHES, default=[]):
vol.All(cv.ensure_list, [{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
}]),
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup S20 switches."""
from orvibo.s20 import discover, S20, S20Exception
switch_data = {}
switches = []
switch_conf = config.get(CONF_SWITCHES, [config])
if config.get(CONF_DISCOVERY):
_LOGGER.info("Discovering S20 switches ...")
switch_data.update(discover())
for switch in switch_conf:
switch_data[switch.get(CONF_HOST)] = switch
for host, data in switch_data.items():
try:
switches.append(S20Switch(data.get(CONF_NAME),
S20(host, mac=data.get(CONF_MAC))))
_LOGGER.info("Initialized S20 at %s", host)
except S20Exception:
_LOGGER.error("S20 at %s couldn't be initialized", host)
add_devices_callback(switches)
class S20Switch(SwitchDevice):
"""Representation of an S20 switch."""
def __init__(self, name, s20):
"""Initialize the S20 device."""
from orvibo.s20 import S20Exception
self._name = name
self._s20 = s20
self._state = False
self._exc = S20Exception
@property
def should_poll(self):
"""Polling is needed."""
return True
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Update device state."""
try:
self._state = self._s20.on
except self._exc:
_LOGGER.exception("Error while fetching S20 state")
def turn_on(self, **kwargs):
"""Turn the device on."""
try:
self._s20.on = True
except self._exc:
_LOGGER.exception("Error while turning on S20")
def turn_off(self, **kwargs):
"""Turn the device off."""
try:
self._s20.on = False
except self._exc:
_LOGGER.exception("Error while turning off S20")
|
mit
|
jlegendary/Dato-Core
|
src/unity/python/graphlab/meta/asttools/visitors/cond_symbol_visitor.py
|
13
|
11320
|
'''
Created on Aug 4, 2011
@author: sean
'''
from __future__ import print_function
from graphlab.meta.asttools.visitors import Visitor, visit_children
from graphlab.meta.asttools.visitors.symbol_visitor import get_symbols
import ast
from graphlab.meta.utils import py2op
class ConditionalSymbolVisitor(Visitor):
def __init__(self):
self._cond_lhs = set()
self._stable_lhs = set()
self._cond_rhs = set()
self._stable_rhs = set()
self.undefined = set()
self.seen_break = False
visitModule = visit_children
visitPass = visit_children
def update_stable_rhs(self, symbols):
new_symbols = symbols - self._stable_rhs
self._update_undefined(new_symbols)
if self.seen_break:
self._cond_rhs.update(new_symbols)
else:
self._cond_rhs -= new_symbols
self._stable_rhs.update(new_symbols)
def update_stable_lhs(self, symbols):
new_symbols = symbols - self._stable_lhs
if self.seen_break:
self._cond_lhs.update(new_symbols)
else:
self._cond_lhs -= new_symbols
self._stable_lhs.update(new_symbols)
def update_cond_rhs(self, symbols):
new_symbols = symbols - self._stable_rhs
self._update_undefined(new_symbols)
self._cond_rhs.update(new_symbols)
def update_cond_lhs(self, symbols):
self._cond_lhs.update(symbols - self._stable_lhs)
def _update_undefined(self, symbols):
self.undefined.update(symbols - self._stable_lhs)
update_undefined = _update_undefined
@property
def stable_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._stable_lhs
@property
def stable_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._stable_rhs
@property
def cond_rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs
@property
def cond_lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs
@property
def lhs(self):
assert not (self._stable_lhs & self._cond_lhs)
return self._cond_lhs | self._stable_lhs
@property
def rhs(self):
assert not (self._stable_rhs & self._cond_rhs)
return self._cond_rhs | self._stable_rhs
def visitAugAssign(self, node):
values = get_symbols(node.value)
self.update_stable_rhs(values)
targets = get_symbols(node.target)
self.update_stable_rhs(targets)
self.update_stable_lhs(targets)
def visitAssign(self, node):
ids = set()
for target in node.targets:
ids.update(get_symbols(target, ast.Store))
rhs_ids = get_symbols(node.value, ast.Load)
for target in node.targets:
rhs_ids.update(get_symbols(target, ast.Load))
self.update_stable_rhs(rhs_ids)
self.update_stable_lhs(ids)
def visitBreak(self, node):
self.seen_break = True
def visitContinue(self, node):
self.seen_break = True
def visit_loop(self, node):
gen = ConditionalSymbolVisitor()
for stmnt in node.body:
gen.visit(stmnt)
self.update_cond_lhs(gen.cond_lhs)
self.update_cond_rhs(gen.cond_rhs)
outputs = gen.stable_lhs
inputs = gen.stable_rhs
gen = ConditionalSymbolVisitor()
for stmnt in node.orelse:
gen.visit(stmnt)
self.update_cond_rhs(gen.cond_rhs)
self.update_cond_lhs(gen.cond_lhs)
orelse_outputs = gen.stable_lhs
orelse_inputs = gen.stable_rhs
self.update_stable_lhs(outputs.intersection(orelse_outputs))
self.update_stable_rhs(inputs.intersection(orelse_inputs))
self.update_cond_lhs(outputs.symmetric_difference(orelse_outputs))
self.update_cond_rhs(inputs.symmetric_difference(orelse_inputs))
def visitFor(self, node):
lhs_symbols = get_symbols(node.target, ast.Store)
self.update_cond_lhs(lhs_symbols)
rhs_symbols = get_symbols(node.iter, ast.Load)
self.update_stable_rhs(rhs_symbols)
remove_from_undef = lhs_symbols - self.undefined
self.visit_loop(node)
self.undefined -= remove_from_undef
def visitExpr(self, node):
rhs_ids = get_symbols(node, ast.Load)
self.update_stable_rhs(rhs_ids)
def visitPrint(self, node):
rhs_ids = get_symbols(node, ast.Load)
self.update_stable_rhs(rhs_ids)
def visitWhile(self, node):
rhs_symbols = get_symbols(node.test, ast.Load)
self.update_stable_rhs(rhs_symbols)
self.visit_loop(node)
def visitIf(self, node):
rhs_symbols = get_symbols(node.test, ast.Load)
self.update_stable_rhs(rhs_symbols)
gen = ConditionalSymbolVisitor()
for stmnt in node.body:
gen.visit(stmnt)
if gen.seen_break:
self.seen_break = True
self.update_cond_lhs(gen._cond_lhs)
self.update_cond_rhs(gen._cond_rhs)
outputs = gen.stable_lhs
inputs = gen.stable_rhs
gen = ConditionalSymbolVisitor()
for stmnt in node.orelse:
gen.visit(stmnt)
self.update_cond_lhs(gen._cond_lhs)
self.update_cond_rhs(gen._cond_rhs)
orelse_outputs = gen.stable_lhs
orelse_inputs = gen.stable_rhs
self.update_stable_lhs(outputs.intersection(orelse_outputs))
self.update_stable_rhs(inputs.intersection(orelse_inputs))
self.update_cond_lhs(outputs.symmetric_difference(orelse_outputs))
self.update_cond_rhs(inputs.symmetric_difference(orelse_inputs))
@py2op
def visitExec(self, node):
self.update_stable_rhs(get_symbols(node.body, ast.Load))
if node.globals:
self.update_stable_rhs(get_symbols(node.globals, ast.Load))
if node.locals:
self.update_stable_rhs(get_symbols(node.locals, ast.Load))
def visitAssert(self, node):
self.update_stable_rhs(get_symbols(node.test, ast.Load))
if node.msg:
self.update_stable_rhs(get_symbols(node.msg, ast.Load))
@py2op
def visitRaise(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.inst:
self.update_stable_rhs(get_symbols(node.inst, ast.Load))
if node.tback:
self.update_stable_rhs(get_symbols(node.tback, ast.Load))
@visitRaise.py3op
def visitRaise(self, node):
if node.exc:
self.update_stable_rhs(get_symbols(node.exc, ast.Load))
if node.cause:
self.update_stable_rhs(get_symbols(node.cause, ast.Load))
def visitTryExcept(self, node):
gen = ConditionalSymbolVisitor()
gen.visit_list(node.body)
self.update_undefined(gen.undefined)
handlers = [csv(hndlr) for hndlr in node.handlers]
for g in handlers:
self.update_undefined(g.undefined)
stable_rhs = gen.stable_rhs.intersection(*[g.stable_rhs for g in handlers])
self.update_stable_rhs(stable_rhs)
all_rhs = gen.rhs.union(*[g.rhs for g in handlers])
self.update_cond_rhs(all_rhs - stable_rhs)
stable_lhs = gen.stable_lhs.intersection(*[g.stable_lhs for g in handlers])
self.update_stable_lhs(stable_lhs)
all_lhs = gen.lhs.union(*[g.lhs for g in handlers])
self.update_cond_lhs(all_lhs - stable_lhs)
gen = ConditionalSymbolVisitor()
gen.visit_list(node.orelse)
self.update_undefined(gen.undefined)
self.update_cond_lhs(gen.lhs)
self.update_cond_rhs(gen.rhs)
@py2op
def visitExceptHandler(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.name:
self.update_stable_lhs(get_symbols(node.name, ast.Store))
self.visit_list(node.body)
@visitExceptHandler.py3op
def visitExceptHandler(self, node):
if node.type:
self.update_stable_rhs(get_symbols(node.type, ast.Load))
if node.name:
self.update_stable_lhs({node.name})
self.visit_list(node.body)
def visitTryFinally(self, node):
self.visit_list(node.body)
self.visit_list(node.finalbody)
def visitImportFrom(self, node):
symbols = get_symbols(node)
self.update_stable_lhs(symbols)
def visitImport(self, node):
symbols = get_symbols(node)
self.update_stable_lhs(symbols)
def visitLambda(self, node):
gen = ConditionalSymbolVisitor()
gen.update_stable_lhs(symbols={arg for arg in node.args.args})
gen.visit_list(node.body)
self.update_stable_rhs(gen.undefined)
def visitFunctionDef(self, node):
for decorator in node.decorator_list:
self.update_stable_rhs(get_symbols(decorator, ast.Load))
self.update_stable_lhs({node.name})
gen = ConditionalSymbolVisitor()
gen.update_stable_lhs(symbols={arg for arg in node.args.args})
gen.visit_list(node.body)
self.update_stable_rhs(gen.undefined)
def visitGlobal(self, node):
pass
def visitWith(self, node):
self.update_stable_rhs(get_symbols(node.context_expr, ast.Load))
if node.optional_vars:
self.update_stable_lhs(get_symbols(node.optional_vars, ast.Load))
self.visit_list(node.body)
def visitReturn(self, node):
self.update_stable_rhs(get_symbols(node.value, ast.Load))
def csv(node):
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen
def lhs(node):
'''
Return a set of symbols in `node` that are assigned.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.lhs
def rhs(node):
'''
Return a set of symbols in `node` that are used.
:param node: ast node
:returns: set of strings.
'''
gen = ConditionalSymbolVisitor()
if isinstance(node, (list, tuple)):
gen.visit_list(node)
else:
gen.visit(node)
return gen.rhs
def conditional_lhs(node):
'''
Group outputs into contitional and stable
:param node: ast node
:returns: tuple of (contitional, stable)
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
return gen.cond_lhs, gen.stable_lhs
def conditional_symbols(node):
'''
Group lhs and rhs into contitional, stable and undefined
:param node: ast node
:returns: tuple of (contitional_lhs, stable_lhs),(contitional_rhs, stable_rhs), undefined
'''
gen = ConditionalSymbolVisitor()
gen.visit(node)
lhs = gen.cond_lhs, gen.stable_lhs
rhs = gen.cond_rhs, gen.stable_rhs
undefined = gen.undefined
return lhs, rhs, undefined
if __name__ == '__main__':
source = '''
while k:
a = 1
b = 1
break
d = 1
else:
a =2
c= 3
d = 1
'''
print(conditional_lhs(ast.parse(source)))
|
agpl-3.0
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/pypy/lib_pypy/pyrepl/simple_interact.py
|
3
|
2395
|
# Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""This is an alternative to python_reader which tries to emulate
the CPython prompt as closely as possible, with the exception of
allowing multiline input and multiline history entries.
"""
import sys
from pyrepl.readline import multiline_input, _error, _get_reader
def check(): # returns False if there is a problem initializing the state
try:
_get_reader()
except _error:
return False
return True
def run_multiline_interactive_console(mainmodule=None):
import code
if mainmodule is None:
import __main__ as mainmodule
console = code.InteractiveConsole(mainmodule.__dict__)
def more_lines(unicodetext):
# ooh, look at the hack:
src = "#coding:utf-8\n"+unicodetext.encode('utf-8')
try:
code = console.compile(src, '<input>', 'single')
except (OverflowError, SyntaxError, ValueError):
return False
else:
return code is None
while 1:
try:
ps1 = getattr(sys, 'ps1', '>>> ')
ps2 = getattr(sys, 'ps2', '... ')
try:
statement = multiline_input(more_lines, ps1, ps2,
returns_unicode=True)
except EOFError:
break
more = console.push(statement)
assert not more
except KeyboardInterrupt:
console.write("\nKeyboardInterrupt\n")
console.resetbuffer()
|
agpl-3.0
|
liukaijv/XlsxWriter
|
xlsxwriter/test/app/test_app02.py
|
8
|
2248
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...app import App
class TestAssembleApp(unittest.TestCase):
"""
Test assembling a complete App file.
"""
def test_assemble_xml_file(self):
"""Test writing an App file."""
self.maxDiff = None
fh = StringIO()
app = App()
app._set_filehandle(fh)
app._add_part_name('Sheet1')
app._add_part_name('Sheet2')
app._add_heading_pair(('Worksheets', 2))
app._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes">
<Application>Microsoft Excel</Application>
<DocSecurity>0</DocSecurity>
<ScaleCrop>false</ScaleCrop>
<HeadingPairs>
<vt:vector size="2" baseType="variant">
<vt:variant>
<vt:lpstr>Worksheets</vt:lpstr>
</vt:variant>
<vt:variant>
<vt:i4>2</vt:i4>
</vt:variant>
</vt:vector>
</HeadingPairs>
<TitlesOfParts>
<vt:vector size="2" baseType="lpstr">
<vt:lpstr>Sheet1</vt:lpstr>
<vt:lpstr>Sheet2</vt:lpstr>
</vt:vector>
</TitlesOfParts>
<Company>
</Company>
<LinksUpToDate>false</LinksUpToDate>
<SharedDoc>false</SharedDoc>
<HyperlinksChanged>false</HyperlinksChanged>
<AppVersion>12.0000</AppVersion>
</Properties>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
bsd-2-clause
|
bobisme/odoo
|
addons/mrp_operations/mrp_operations.py
|
17
|
26993
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import time
from datetime import datetime
from openerp.tools.translate import _
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_dest_id_lines': fields.one2many('stock.move','move_dest_id', 'Children Moves')
}
class mrp_production_workcenter_line(osv.osv):
def _get_date_end(self, cr, uid, ids, field_name, arg, context=None):
""" Finds ending date.
@return: Dictionary of values.
"""
ops = self.browse(cr, uid, ids, context=context)
date_and_hours_by_cal = [(op.date_planned, op.hour, op.workcenter_id.calendar_id.id) for op in ops if op.date_planned]
intervals = self.pool.get('resource.calendar').interval_get_multi(cr, uid, date_and_hours_by_cal)
res = {}
for op in ops:
res[op.id] = False
if op.date_planned:
i = intervals.get((op.date_planned, op.hour, op.workcenter_id.calendar_id.id))
if i:
res[op.id] = i[-1][1].strftime('%Y-%m-%d %H:%M:%S')
else:
res[op.id] = op.date_planned
return res
def onchange_production_id(self, cr, uid, ids, production_id, context=None):
if not production_id:
return {}
production = self.pool.get('mrp.production').browse(cr, uid, production_id, context=None)
result = {
'product': production.product_id.id,
'qty': production.product_qty,
'uom': production.product_uom.id,
}
return {'value': result}
_inherit = 'mrp.production.workcenter.line'
_order = "sequence, date_planned"
_columns = {
'state': fields.selection([('draft','Draft'),('cancel','Cancelled'),('pause','Pending'),('startworking', 'In Progress'),('done','Finished')],'Status', readonly=True, copy=False,
help="* When a work order is created it is set in 'Draft' status.\n" \
"* When user sets work order in start mode that time it will be set in 'In Progress' status.\n" \
"* When work order is in running mode, during that time if user wants to stop or to make changes in order then can set in 'Pending' status.\n" \
"* When the user cancels the work order it will be set in 'Canceled' status.\n" \
"* When order is completely processed that time it is set in 'Finished' status."),
'date_planned': fields.datetime('Scheduled Date', select=True),
'date_planned_end': fields.function(_get_date_end, string='End Date', type='datetime'),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'delay': fields.float('Working Hours',help="The elapsed time between operation start and stop in this Work Center",readonly=True),
'production_state':fields.related('production_id','state',
type='selection',
selection=[('draft','Draft'),('confirmed','Waiting Goods'),('ready','Ready to Produce'),('in_production','In Production'),('cancel','Canceled'),('done','Done')],
string='Production Status', readonly=True),
'product':fields.related('production_id','product_id',type='many2one',relation='product.product',string='Product',
readonly=True),
'qty':fields.related('production_id','product_qty',type='float',string='Qty',readonly=True, store=True),
'uom':fields.related('production_id','product_uom',type='many2one',relation='product.uom',string='Unit of Measure',readonly=True),
}
_defaults = {
'state': 'draft',
'delay': 0.0,
'production_state': 'draft'
}
def modify_production_order_state(self, cr, uid, ids, action):
""" Modifies production order state if work order state is changed.
@param action: Action to perform.
@return: Nothing
"""
prod_obj_pool = self.pool.get('mrp.production')
oper_obj = self.browse(cr, uid, ids)[0]
prod_obj = oper_obj.production_id
if action == 'start':
if prod_obj.state =='confirmed':
prod_obj_pool.force_production(cr, uid, [prod_obj.id])
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='ready':
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='in_production':
return
else:
raise osv.except_osv(_('Error!'),_('Manufacturing order cannot be started in state "%s"!') % (prod_obj.state,))
else:
open_count = self.search_count(cr,uid,[('production_id','=',prod_obj.id), ('state', '!=', 'done')])
flag = not bool(open_count)
if flag:
for production in prod_obj_pool.browse(cr, uid, [prod_obj.id], context= None):
if production.move_lines or production.move_created_ids:
prod_obj_pool.action_produce(cr,uid, production.id, production.product_qty, 'consume_produce', context = None)
prod_obj_pool.signal_workflow(cr, uid, [oper_obj.production_id.id], 'button_produce_done')
return
def write(self, cr, uid, ids, vals, context=None, update=True):
result = super(mrp_production_workcenter_line, self).write(cr, uid, ids, vals, context=context)
prod_obj = self.pool.get('mrp.production')
if vals.get('date_planned', False) and update:
for prod in self.browse(cr, uid, ids, context=context):
if prod.production_id.workcenter_lines:
dstart = min(vals['date_planned'], prod.production_id.workcenter_lines[0]['date_planned'])
prod_obj.write(cr, uid, [prod.production_id.id], {'date_start':dstart}, context=context, mini=False)
return result
def action_draft(self, cr, uid, ids, context=None):
""" Sets state to draft.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_start_working(self, cr, uid, ids, context=None):
""" Sets state to start working and writes starting date.
@return: True
"""
self.modify_production_order_state(cr, uid, ids, 'start')
self.write(cr, uid, ids, {'state':'startworking', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
""" Sets state to done, writes finish date and calculates delay.
@return: True
"""
delay = 0.0
date_now = time.strftime('%Y-%m-%d %H:%M:%S')
obj_line = self.browse(cr, uid, ids[0])
date_start = datetime.strptime(obj_line.date_start,'%Y-%m-%d %H:%M:%S')
date_finished = datetime.strptime(date_now,'%Y-%m-%d %H:%M:%S')
delay += (date_finished-date_start).days * 24
delay += (date_finished-date_start).seconds / float(60*60)
self.write(cr, uid, ids, {'state':'done', 'date_finished': date_now,'delay':delay}, context=context)
self.modify_production_order_state(cr,uid,ids,'done')
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Sets state to cancel.
@return: True
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def action_pause(self, cr, uid, ids, context=None):
""" Sets state to pause.
@return: True
"""
return self.write(cr, uid, ids, {'state':'pause'}, context=context)
def action_resume(self, cr, uid, ids, context=None):
""" Sets state to startworking.
@return: True
"""
return self.write(cr, uid, ids, {'state':'startworking'}, context=context)
class mrp_production(osv.osv):
_inherit = 'mrp.production'
_columns = {
'allow_reorder': fields.boolean('Free Serialisation', help="Check this to be able to move independently all production orders, without moving dependent ones."),
}
def _production_date_end(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates planned end date of production order.
@return: Dictionary of values
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = prod.date_planned
for line in prod.workcenter_lines:
result[prod.id] = max(line.date_planned_end, result[prod.id])
return result
def action_production_end(self, cr, uid, ids):
""" Finishes work order if production order is done.
@return: Super method
"""
obj = self.browse(cr, uid, ids)[0]
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for workcenter_line in obj.workcenter_lines:
if workcenter_line.state == 'draft':
workcenter_line.signal_workflow('button_start_working')
workcenter_line.signal_workflow('button_done')
return super(mrp_production,self).action_production_end(cr, uid, ids)
def action_in_production(self, cr, uid, ids):
""" Changes state to In Production and writes starting date.
@return: True
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for prod in self.browse(cr, uid, ids):
if prod.workcenter_lines:
workcenter_pool.signal_workflow(cr, uid, [prod.workcenter_lines[0].id], 'button_start_working')
return super(mrp_production,self).action_in_production(cr, uid, ids)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels work order if production order is canceled.
@return: Super method
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
obj = self.browse(cr, uid, ids,context=context)[0]
workcenter_pool.signal_workflow(cr, uid, [record.id for record in obj.workcenter_lines], 'button_cancel')
return super(mrp_production,self).action_cancel(cr,uid,ids,context=context)
def _compute_planned_workcenter(self, cr, uid, ids, context=None, mini=False):
""" Computes planned and finished dates for work order.
@return: Calculated date
"""
dt_end = datetime.now()
if context is None:
context = {}
for po in self.browse(cr, uid, ids, context=context):
dt_end = datetime.strptime(po.date_planned, '%Y-%m-%d %H:%M:%S')
if not po.date_start:
self.write(cr, uid, [po.id], {
'date_start': po.date_planned
}, context=context, update=False)
old = None
for wci in range(len(po.workcenter_lines)):
wc = po.workcenter_lines[wci]
if (old is None) or (wc.sequence>old):
dt = dt_end
if context.get('__last_update'):
del context['__last_update']
if (wc.date_planned < dt.strftime('%Y-%m-%d %H:%M:%S')) or mini:
self.pool.get('mrp.production.workcenter.line').write(cr, uid, [wc.id], {
'date_planned': dt.strftime('%Y-%m-%d %H:%M:%S')
}, context=context, update=False)
i = self.pool.get('resource.calendar').interval_get(
cr,
uid,
#passing False makes resource_resource._schedule_hours run 1000 iterations doing nothing
wc.workcenter_id.calendar_id and wc.workcenter_id.calendar_id.id or None,
dt,
wc.hour or 0.0
)
if i:
dt_end = max(dt_end, i[-1][1])
else:
dt_end = datetime.strptime(wc.date_planned_end, '%Y-%m-%d %H:%M:%S')
old = wc.sequence or 0
super(mrp_production, self).write(cr, uid, [po.id], {
'date_finished': dt_end
})
return dt_end
def _move_pass(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves finding interval from resource calendar.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
todo = list(po.move_lines)
dt = datetime.strptime(po.date_start,'%Y-%m-%d %H:%M:%S')
while todo:
l = todo.pop(0)
if l.state in ('done','cancel','draft'):
continue
todo += l.move_dest_id_lines
if l.production_id and (l.production_id.date_finished > dt):
if l.production_id.state not in ('done','cancel'):
for wc in l.production_id.workcenter_lines:
i = self.pool.get('resource.calendar').interval_min_get(
cr,
uid,
wc.workcenter_id.calendar_id.id or False,
dt, wc.hour or 0.0
)
dt = i[0][0]
if l.production_id.date_start > dt.strftime('%Y-%m-%d %H:%M:%S'):
self.write(cr, uid, [l.production_id.id], {'date_start':dt.strftime('%Y-%m-%d %H:%M:%S')}, mini=True)
return True
def _move_futur(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
for line in po.move_created_ids:
l = line
while l.move_dest_id:
l = l.move_dest_id
if l.state in ('done','cancel','draft'):
break
if l.production_id.state in ('done','cancel'):
break
if l.production_id and (l.production_id.date_start < po.date_finished):
self.write(cr, uid, [l.production_id.id], {'date_start': po.date_finished})
break
return True
def write(self, cr, uid, ids, vals, context=None, update=True, mini=True):
direction = {}
if vals.get('date_start', False):
for po in self.browse(cr, uid, ids, context=context):
direction[po.id] = cmp(po.date_start, vals.get('date_start', False))
result = super(mrp_production, self).write(cr, uid, ids, vals, context=context)
if (vals.get('workcenter_lines', False) or vals.get('date_start', False)) and update:
self._compute_planned_workcenter(cr, uid, ids, context=context, mini=mini)
for d in direction:
if direction[d] == 1:
# the production order has been moved to the passed
self._move_pass(cr, uid, [d], context=context)
pass
elif direction[d] == -1:
self._move_futur(cr, uid, [d], context=context)
# the production order has been moved to the future
pass
return result
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product and planned date of work order.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
result = super(mrp_production, self).action_compute(cr, uid, ids, properties=properties, context=context)
self._compute_planned_workcenter(cr, uid, ids, context=context)
return result
class mrp_operations_operation_code(osv.osv):
_name="mrp_operations.operation.code"
_columns={
'name': fields.char('Operation Name', required=True),
'code': fields.char('Code', size=16, required=True),
'start_stop': fields.selection([('start','Start'),('pause','Pause'),('resume','Resume'),('cancel','Cancelled'),('done','Done')], 'Status', required=True),
}
class mrp_operations_operation(osv.osv):
_name="mrp_operations.operation"
def _order_date_search_production(self, cr, uid, ids, context=None):
""" Finds operations for a production order.
@return: List of ids
"""
operation_ids = self.pool.get('mrp_operations.operation').search(cr, uid, [('production_id','=',ids[0])], context=context)
return operation_ids
def _get_order_date(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates planned date for an operation.
@return: Dictionary of values
"""
res={}
operation_obj = self.browse(cr, uid, ids, context=context)
for operation in operation_obj:
res[operation.id] = operation.production_id.date_planned
return res
def calc_delay(self, cr, uid, vals):
""" Calculates delay of work order.
@return: Delay
"""
code_lst = []
time_lst = []
code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]
oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs = self.browse(cr,uid,oper_ids)
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
time_lst.append(oper.date_start)
code_lst.append(code.start_stop)
time_lst.append(vals['date_start'])
diff = 0
for i in range(0,len(code_lst)):
if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
if not i: continue
if code_lst[i-1] not in ('resume','start'):
continue
a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
diff += (b-a).days * 24
diff += (b-a).seconds / float(60*60)
return diff
def check_operation(self, cr, uid, vals):
""" Finds which operation is called ie. start, pause, done, cancel.
@param vals: Dictionary of values.
@return: True or False
"""
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr,uid,code_ids)[0]
code_lst = []
oper_ids=self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs=self.browse(cr,uid,oper_ids)
if not oper_objs:
if code.start_stop!='start':
raise osv.except_osv(_('Sorry!'),_('Operation is not started yet!'))
return False
else:
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
if code.start_stop=='start':
if 'start' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation has already started! You can either Pause/Finish/Cancel the operation.'))
return False
if code.start_stop=='pause':
if code_lst[len(code_lst)-1]!='resume' and code_lst[len(code_lst)-1]!='start':
raise osv.except_osv(_('Error!'),_('In order to Pause the operation, it must be in the Start or Resume state!'))
return False
if code.start_stop=='resume':
if code_lst[len(code_lst)-1]!='pause':
raise osv.except_osv(_('Error!'),_('In order to Resume the operation, it must be in the Pause state!'))
return False
if code.start_stop=='done':
if code_lst[len(code_lst)-1]!='start' and code_lst[len(code_lst)-1]!='resume':
raise osv.except_osv(_('Sorry!'),_('In order to Finish the operation, it must be in the Start or Resume state!'))
return False
if 'cancel' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation is Already Cancelled!'))
return False
if code.start_stop=='cancel':
if not 'start' in code_lst :
raise osv.except_osv(_('Error!'),_('No operation to cancel.'))
return False
if 'done' in code_lst:
raise osv.except_osv(_('Error!'),_('Operation is already finished!'))
return False
return True
def write(self, cr, uid, ids, vals, context=None):
oper_objs = self.browse(cr, uid, ids, context=context)[0]
vals['production_id']=oper_objs.production_id.id
vals['workcenter_id']=oper_objs.workcenter_id.id
if 'code_id' in vals:
self.check_operation(cr, uid, vals)
if 'date_start' in vals:
vals['date_start']=vals['date_start']
vals['code_id']=oper_objs.code_id.id
delay=self.calc_delay(cr, uid, vals)
wc_op_id=self.pool.get('mrp.production.workcenter.line').search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
self.pool.get('mrp.production.workcenter.line').write(cr,uid,wc_op_id,{'delay':delay})
return super(mrp_operations_operation, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids, context=context)[0]
wc_op_id=workcenter_pool.search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
if code.start_stop in ('start','done','pause','cancel','resume'):
if not wc_op_id:
production_obj=self.pool.get('mrp.production').browse(cr, uid, vals['production_id'], context=context)
wc_op_id.append(workcenter_pool.create(cr,uid,{'production_id':vals['production_id'],'name':production_obj.product_id.name,'workcenter_id':vals['workcenter_id']}))
if code.start_stop=='start':
workcenter_pool.action_start_working(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_start_working')
if code.start_stop=='done':
workcenter_pool.action_done(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_done')
self.pool.get('mrp.production').write(cr,uid,vals['production_id'],{'date_finished':datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
if code.start_stop=='pause':
workcenter_pool.action_pause(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_pause')
if code.start_stop=='resume':
workcenter_pool.action_resume(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_resume')
if code.start_stop=='cancel':
workcenter_pool.action_cancel(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_cancel')
if not self.check_operation(cr, uid, vals):
return
delay=self.calc_delay(cr, uid, vals)
line_vals = {}
line_vals['delay'] = delay
if vals.get('date_start',False):
if code.start_stop == 'done':
line_vals['date_finished'] = vals['date_start']
elif code.start_stop == 'start':
line_vals['date_start'] = vals['date_start']
self.pool.get('mrp.production.workcenter.line').write(cr, uid, wc_op_id, line_vals, context=context)
return super(mrp_operations_operation, self).create(cr, uid, vals, context=context)
def initialize_workflow_instance(self, cr, uid, context=None):
mrp_production_workcenter_line = self.pool.get('mrp.production.workcenter.line')
line_ids = mrp_production_workcenter_line.search(cr, uid, [], context=context)
mrp_production_workcenter_line.create_workflow(cr, uid, line_ids)
return True
_columns={
'production_id':fields.many2one('mrp.production','Production',required=True),
'workcenter_id':fields.many2one('mrp.workcenter','Work Center',required=True),
'code_id':fields.many2one('mrp_operations.operation.code','Code',required=True),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'order_date': fields.function(_get_order_date,string='Order Date',type='date',store={'mrp.production':(_order_date_search_production,['date_planned'], 10)}),
}
_defaults={
'date_start': lambda *a:datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jabesq/home-assistant
|
tests/components/rflink/test_switch.py
|
11
|
10068
|
"""Test for RFlink switch components.
Test setup of rflink switch component/platform. State tracking and
control of Rflink switch devices.
"""
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_ON, STATE_OFF)
from homeassistant.core import callback, State, CoreState
from tests.common import mock_restore_cache
from tests.components.rflink.test_init import mock_rflink
DOMAIN = 'switch'
CONFIG = {
'rflink': {
'port': '/dev/ttyABC0',
'ignore_devices': ['ignore_wildcard_*', 'ignore_sensor'],
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliases': ['test_alias_0_0'],
},
},
},
}
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink switch component."""
# setup mocking rflink module
event_callback, create, protocol, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]['ignore']
# test default state of switch loaded from config
switch_initial = hass.states.get('switch.test')
assert switch_initial.state == 'off'
assert switch_initial.attributes['assumed_state']
# switch should follow state of the hardware device by interpreting
# incoming events for its name and aliases
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'on',
})
await hass.async_block_till_done()
switch_after_first_command = hass.states.get('switch.test')
assert switch_after_first_command.state == 'on'
# also after receiving first command state not longer has to be assumed
assert not switch_after_first_command.attributes.get('assumed_state')
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
await hass.async_block_till_done()
assert hass.states.get('switch.test').state == 'off'
# test following aliases
# mock incoming command event for this device alias
event_callback({
'id': 'test_alias_0_0',
'command': 'on',
})
await hass.async_block_till_done()
assert hass.states.get('switch.test').state == 'on'
# The switch component does not support adding new devices for incoming
# events because every new unknown device is added as a light by default.
# test changing state from HA propagates to Rflink
hass.async_create_task(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'off'
assert protocol.send_command_ack.call_args_list[0][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[0][0][1] == 'off'
hass.async_create_task(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
assert protocol.send_command_ack.call_args_list[1][0][1] == 'on'
async def test_group_alias(hass, monkeypatch):
"""Group aliases should only respond to group commands (allon/alloff)."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'group_aliases': ['test_group_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to group alias
event_callback({
'id': 'test_group_0_0',
'command': 'allon',
})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
# test sending group command to group alias
event_callback({
'id': 'test_group_0_0',
'command': 'off',
})
await hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
async def test_nogroup_alias(hass, monkeypatch):
"""Non group aliases should not respond to group commands."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'nogroup_aliases': ['test_nogroup_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup alias
event_callback({
'id': 'test_nogroup_0_0',
'command': 'allon',
})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup alias
event_callback({
'id': 'test_nogroup_0_0',
'command': 'on',
})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + '.test').state == 'on'
async def test_nogroup_device_id(hass, monkeypatch):
"""Device id that do not respond to group commands (allon/alloff)."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'test_nogroup_0_0': {
'name': 'test',
'group': False,
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup
event_callback({
'id': 'test_nogroup_0_0',
'command': 'allon',
})
await hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup
event_callback({
'id': 'test_nogroup_0_0',
'command': 'on',
})
await hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + '.test').state == 'on'
async def test_device_defaults(hass, monkeypatch):
"""Event should fire if device_defaults config says so."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'device_defaults': {
'fire_event': True,
},
'devices': {
'protocol_0_0': {
'name': 'test',
'aliases': ['test_alias_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
await hass.async_block_till_done()
assert calls[0].data == {'state': 'off', 'entity_id': DOMAIN + '.test'}
async def test_not_firing_default(hass, monkeypatch):
"""By default no bus events should be fired."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliases': ['test_alias_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
await hass.async_block_till_done()
assert not calls, 'an event has been fired'
async def test_restore_state(hass, monkeypatch):
"""Ensure states are restored on startup."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'test': {
'name': 's1',
'aliases': ['test_alias_0_0'],
},
'switch_test': {
'name': 's2',
},
'switch_s3': {
'name': 's3',
}
}
}
}
mock_restore_cache(hass, (
State(DOMAIN + '.s1', STATE_ON, ),
State(DOMAIN + '.s2', STATE_OFF, ),
))
hass.state = CoreState.starting
# setup mocking rflink module
_, _, _, _ = await mock_rflink(hass, config, DOMAIN, monkeypatch)
state = hass.states.get(DOMAIN + '.s1')
assert state
assert state.state == STATE_ON
state = hass.states.get(DOMAIN + '.s2')
assert state
assert state.state == STATE_OFF
# not cached switch must default values
state = hass.states.get(DOMAIN + '.s3')
assert state
assert state.state == STATE_OFF
assert state.attributes['assumed_state']
|
apache-2.0
|
hdinsight/hue
|
desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/chart/axis.py
|
10
|
12070
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Float,
NoneSet,
Bool,
Integer,
MinMax,
NoneSet,
Set,
String,
Alias,
)
from openpyxl.descriptors.excel import ExtensionList, Percentage
from openpyxl.descriptors.nested import (
NestedValue,
NestedSet,
NestedBool,
NestedNoneSet,
NestedFloat,
NestedInteger,
NestedMinMax,
NestedSequence,
)
from .descriptors import NumberFormatDescriptor
from .layout import Layout
from .text import Text, RichText
from .shapes import ShapeProperties
from .title import Title, TitleDescriptor
class ChartLines(Serialisable):
tagname = "chartLines"
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
shapeProperties = Alias('spPr')
def __init__(self, spPr=None):
self.spPr = spPr
class Scaling(Serialisable):
tagname = "scaling"
logBase = NestedFloat(allow_none=True)
orientation = NestedSet(values=(['maxMin', 'minMax']))
max = NestedFloat(allow_none=True)
min = NestedFloat(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('logBase', 'orientation', 'max', 'min',)
def __init__(self,
logBase=None,
orientation="minMax",
max=None,
min=None,
extLst=None,
):
self.logBase = logBase
self.orientation = orientation
self.max = max
self.min = min
class _BaseAxis(Serialisable):
axId = NestedInteger(expected_type=int)
scaling = Typed(expected_type=Scaling)
delete = NestedBool(allow_none=True)
axPos = NestedSet(values=(['b', 'l', 'r', 't']))
majorGridlines = Typed(expected_type=ChartLines, allow_none=True)
minorGridlines = Typed(expected_type=ChartLines, allow_none=True)
title = TitleDescriptor()
numFmt = NumberFormatDescriptor()
number_format = Alias("numFmt")
majorTickMark = NestedNoneSet(values=(['cross', 'in', 'out']))
minorTickMark = NestedNoneSet(values=(['cross', 'in', 'out']))
tickLblPos = NestedNoneSet(values=(['high', 'low', 'nextTo']))
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
shapeProperties = Alias('spPr')
txPr = Typed(expected_type=RichText, allow_none=True)
textProperties = Alias('txPr')
crossAx = NestedInteger(expected_type=int) # references other axis
crosses = NestedNoneSet(values=(['autoZero', 'max', 'min']))
crossesAt = NestedFloat(allow_none=True)
# crosses & crossesAt are mutually exclusive
__elements__ = ('axId', 'scaling', 'delete', 'axPos', 'majorGridlines',
'minorGridlines', 'numFmt', 'majorTickMark', 'minorTickMark',
'tickLblPos', 'spPr', 'title', 'txPr', 'crossAx', 'crosses', 'crossesAt')
def __init__(self,
axId=None,
scaling=None,
delete=None,
axPos='l',
majorGridlines=None,
minorGridlines=None,
title=None,
numFmt=None,
majorTickMark=None,
minorTickMark=None,
tickLblPos=None,
spPr=None,
txPr= None,
crossAx=None,
crosses=None,
crossesAt=None,
):
self.axId = axId
if scaling is None:
scaling = Scaling()
self.scaling = Scaling()
self.delete = delete
self.axPos = axPos
self.majorGridlines = majorGridlines
self.minorGridlines = minorGridlines
self.title = title
self.numFmt = numFmt
self.majorTickMark = majorTickMark
self.minorTickMark = minorTickMark
self.tickLblPos = tickLblPos
self.spPr = spPr
self.txPr = txPr
self.crossAx = crossAx
self.crosses = crosses
self.crossesAt = None
class DisplayUnitsLabel(Serialisable):
tagname = "dispUnitsLbl"
layout = Typed(expected_type=Layout, allow_none=True)
tx = Typed(expected_type=Text, allow_none=True)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
txPr = Typed(expected_type=RichText, allow_none=True)
__elements__ = ('layout', 'tx', 'spPr', 'txPr')
def __init__(self,
layout=None,
tx=None,
spPr=None,
txPr=None,
):
self.layout = layout
self.tx = tx
self.spPr = spPr
self.txPr = txPr
class DisplayUnits(Serialisable):
tagname = "dispUnits"
custUnit = NestedFloat(allow_none=True)
builtInUnit = NestedNoneSet(values=(['hundreds', 'thousands',
'tenThousands', 'hundredThousands', 'millions', 'tenMillions',
'hundredMillions', 'billions', 'trillions']))
dispUnitsLbl = Typed(expected_type=DisplayUnitsLabel, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('custUnit', 'builtInUnit', 'dispUnitsLbl',)
def __init__(self,
custUnit=None,
builtInUnit=None,
dispUnitsLbl=None,
extLst=None,
):
self.custUnit = custUnit
self.builtInUnit = builtInUnit
self.dispUnitsLbl = dispUnitsLbl
class NumericAxis(_BaseAxis):
tagname = "valAx"
axId = _BaseAxis.axId
scaling = _BaseAxis.scaling
delete = _BaseAxis.delete
axPos = _BaseAxis.axPos
majorGridlines = _BaseAxis.majorGridlines
minorGridlines = _BaseAxis.minorGridlines
title = _BaseAxis.title
numFmt = _BaseAxis.numFmt
majorTickMark = _BaseAxis.majorTickMark
minorTickMark = _BaseAxis.minorTickMark
tickLblPos = _BaseAxis.tickLblPos
spPr = _BaseAxis.spPr
txPr = _BaseAxis.txPr
crossAx = _BaseAxis.crossAx
crosses = _BaseAxis.crosses
crossesAt = _BaseAxis.crossesAt
crossBetween = NestedNoneSet(values=(['between', 'midCat']))
majorUnit = NestedFloat(allow_none=True)
minorUnit = NestedFloat(allow_none=True)
dispUnits = Typed(expected_type=DisplayUnits, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = _BaseAxis.__elements__ + ('crossBetween', 'majorUnit',
'minorUnit', 'dispUnits',)
def __init__(self,
crossBetween=None,
majorUnit=None,
minorUnit=None,
dispUnits=None,
extLst=None,
**kw
):
self.crossBetween = crossBetween
self.majorUnit = majorUnit
self.minorUnit = minorUnit
self.dispUnits = dispUnits
kw.setdefault('majorGridlines', ChartLines())
kw.setdefault('axId', 100)
kw.setdefault('crossAx', 10)
super(NumericAxis, self).__init__(**kw)
class TextAxis(_BaseAxis):
tagname = "catAx"
axId = _BaseAxis.axId
scaling = _BaseAxis.scaling
delete = _BaseAxis.delete
axPos = _BaseAxis.axPos
majorGridlines = _BaseAxis.majorGridlines
minorGridlines = _BaseAxis.minorGridlines
title = _BaseAxis.title
numFmt = _BaseAxis.numFmt
majorTickMark = _BaseAxis.majorTickMark
minorTickMark = _BaseAxis.minorTickMark
tickLblPos = _BaseAxis.tickLblPos
spPr = _BaseAxis.spPr
txPr = _BaseAxis.txPr
crossAx = _BaseAxis.crossAx
crosses = _BaseAxis.crosses
crossesAt = _BaseAxis.crossesAt
auto = NestedBool(allow_none=True)
lblAlgn = NestedNoneSet(values=(['ctr', 'l', 'r']))
lblOffset = NestedMinMax(min=0, max=1000)
tickLblSkip = NestedInteger(allow_none=True)
tickMarkSkip = NestedInteger(allow_none=True)
noMultiLvlLbl = NestedBool(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = _BaseAxis.__elements__ + ('auto', 'lblAlgn', 'lblOffset',
'tickLblSkip', 'tickMarkSkip', 'noMultiLvlLbl')
def __init__(self,
auto=None,
lblAlgn=None,
lblOffset=100,
tickLblSkip=None,
tickMarkSkip=None,
noMultiLvlLbl=None,
extLst=None,
**kw
):
self.auto = auto
self.lblAlgn = lblAlgn
self.lblOffset = lblOffset
self.tickLblSkip = tickLblSkip
self.tickMarkSkip = tickMarkSkip
self.noMultiLvlLbl = noMultiLvlLbl
kw.setdefault('axId', 10)
kw.setdefault('crossAx', 100)
super(TextAxis, self).__init__(**kw)
class DateAxis(_BaseAxis):
tagname = "dateAx"
axId = _BaseAxis.axId
scaling = _BaseAxis.scaling
delete = _BaseAxis.delete
axPos = _BaseAxis.axPos
majorGridlines = _BaseAxis.majorGridlines
minorGridlines = _BaseAxis.minorGridlines
title = _BaseAxis.title
numFmt = _BaseAxis.numFmt
majorTickMark = _BaseAxis.majorTickMark
minorTickMark = _BaseAxis.minorTickMark
tickLblPos = _BaseAxis.tickLblPos
spPr = _BaseAxis.spPr
txPr = _BaseAxis.txPr
crossAx = _BaseAxis.crossAx
crosses = _BaseAxis.crosses
crossesAt = _BaseAxis.crossesAt
auto = NestedBool(allow_none=True)
lblOffset = NestedInteger(allow_none=True)
baseTimeUnit = NestedNoneSet(values=(['days', 'months', 'years']))
majorUnit = NestedFloat(allow_none=True)
majorTimeUnit = NestedNoneSet(values=(['days', 'months', 'years']))
minorUnit = NestedFloat(allow_none=True)
minorTimeUnit = NestedNoneSet(values=(['days', 'months', 'years']))
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = _BaseAxis.__elements__ + ('auto', 'lblOffset',
'baseTimeUnit', 'majorUnit', 'majorTimeUnit', 'minorUnit',
'minorTimeUnit')
def __init__(self,
auto=None,
lblOffset=None,
baseTimeUnit=None,
majorUnit=1,
majorTimeUnit=None,
minorUnit=None,
minorTimeUnit=None,
extLst=None,
**kw
):
self.auto = auto
self.lblOffset = lblOffset
self.baseTimeUnit = baseTimeUnit
self.majorUnit = majorUnit
self.majorTimeUnit = majorTimeUnit
self.minorUnit = minorUnit
self.minorTimeUnit = minorTimeUnit
kw.setdefault('axId', 500)
super(DateAxis, self).__init__(**kw)
class SeriesAxis(_BaseAxis):
tagname = "serAx"
axId = _BaseAxis.axId
scaling = _BaseAxis.scaling
delete = _BaseAxis.delete
axPos = _BaseAxis.axPos
majorGridlines = _BaseAxis.majorGridlines
minorGridlines = _BaseAxis.minorGridlines
title = _BaseAxis.title
numFmt = _BaseAxis.numFmt
majorTickMark = _BaseAxis.majorTickMark
minorTickMark = _BaseAxis.minorTickMark
tickLblPos = _BaseAxis.tickLblPos
spPr = _BaseAxis.spPr
txPr = _BaseAxis.txPr
crossAx = _BaseAxis.crossAx
crosses = _BaseAxis.crosses
crossesAt = _BaseAxis.crossesAt
tickLblSkip = NestedInteger(allow_none=True)
tickMarkSkip = NestedInteger(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = _BaseAxis.__elements__ + ('tickLblSkip', 'tickMarkSkip')
def __init__(self,
tickLblSkip=None,
tickMarkSkip=None,
extLst=None,
**kw
):
self.tickLblSkip = tickLblSkip
self.tickMarkSkip = tickMarkSkip
kw.setdefault('axId', 1000)
kw.setdefault('crossAx', 10)
super(SeriesAxis, self).__init__(**kw)
|
apache-2.0
|
aperigault/ansible
|
lib/ansible/modules/windows/win_updates.py
|
2
|
10479
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Matt Davis <mdavis_ansible@rolpdog.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_updates
version_added: "2.0"
short_description: Download and install Windows updates
description:
- Searches, downloads, and installs Windows updates synchronously by automating the Windows Update client.
options:
blacklist:
description:
- A list of update titles or KB numbers that can be used to specify
which updates are to be excluded from installation.
- If an available update does match one of the entries, then it is
skipped and not installed.
- Each entry can either be the KB article or Update title as a regex
according to the PowerShell regex rules.
type: list
version_added: '2.5'
category_names:
description:
- A scalar or list of categories to install updates from. To get the list
of categories, run the module with C(state=searched). The category must
be the full category string, but is case insensitive.
- Some possible categories are Application, Connectors, Critical Updates,
Definition Updates, Developer Kits, Feature Packs, Guidance, Security
Updates, Service Packs, Tools, Update Rollups and Updates.
type: list
default: [ CriticalUpdates, SecurityUpdates, UpdateRollups ]
reboot:
description:
- Ansible will automatically reboot the remote host if it is required
and continue to install updates after the reboot.
- This can be used instead of using a M(win_reboot) task after this one
and ensures all updates for that category is installed in one go.
- Async does not work when C(reboot=yes).
type: bool
default: no
version_added: '2.5'
reboot_timeout:
description:
- The time in seconds to wait until the host is back online from a
reboot.
- This is only used if C(reboot=yes) and a reboot is required.
default: 1200
version_added: '2.5'
server_selection:
description:
- Defines the Windows Update source catalog.
- C(default) Use the default search source. For many systems default is
set to the Microsoft Windows Update catalog. Systems participating in
Windows Server Update Services (WSUS), Systems Center Configuration
Manager (SCCM), or similar corporate update server environments may
default to those managed update sources instead of the Windows Update
catalog.
- C(managed_server) Use a managed server catalog. For environments
utilizing Windows Server Update Services (WSUS), Systems Center
Configuration Manager (SCCM), or similar corporate update servers, this
option selects the defined corporate update source.
- C(windows_update) Use the Microsoft Windows Update catalog.
type: str
choices: [ default, managed_server, windows_update ]
default: default
version_added: '2.8'
state:
description:
- Controls whether found updates are returned as a list or actually installed.
- This module also supports Ansible check mode, which has the same effect as setting state=searched
type: str
choices: [ installed, searched ]
default: installed
log_path:
description:
- If set, C(win_updates) will append update progress to the specified file. The directory must already exist.
type: path
whitelist:
description:
- A list of update titles or KB numbers that can be used to specify
which updates are to be searched or installed.
- If an available update does not match one of the entries, then it
is skipped and not installed.
- Each entry can either be the KB article or Update title as a regex
according to the PowerShell regex rules.
- The whitelist is only validated on updates that were found based on
I(category_names). It will not force the module to install an update
if it was not in the category specified.
type: list
version_added: '2.5'
use_scheduled_task:
description:
- Will not auto elevate the remote process with I(become) and use a
scheduled task instead.
- Set this to C(yes) when using this module with async on Server 2008,
2008 R2, or Windows 7, or on Server 2008 that is not authenticated
with basic or credssp.
- Can also be set to C(yes) on newer hosts where become does not work
due to further privilege restrictions from the OS defaults.
type: bool
default: no
version_added: '2.6'
notes:
- C(win_updates) must be run by a user with membership in the local Administrators group.
- C(win_updates) will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc).
- C(win_updates) will I(become) SYSTEM using I(runas) unless C(use_scheduled_task) is C(yes)
- By default C(win_updates) does not manage reboots, but will signal when a
reboot is required with the I(reboot_required) return value, as of Ansible v2.5
C(reboot) can be used to reboot the host if required in the one task.
- C(win_updates) can take a significant amount of time to complete (hours, in some cases).
Performance depends on many factors, including OS version, number of updates, system load, and update server load.
- Beware that just after C(win_updates) reboots the system, the Windows system may not have settled yet
and some base services could be in limbo. This can result in unexpected behavior.
Check the examples for ways to mitigate this.
- More information about PowerShell and how it handles RegEx strings can be
found at U(https://technet.microsoft.com/en-us/library/2007.11.powershell.aspx).
seealso:
- module: win_chocolatey
- module: win_feature
- module: win_hotfix
- module: win_package
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
- name: Install all security, critical, and rollup updates without a scheduled task
win_updates:
category_names:
- SecurityUpdates
- CriticalUpdates
- UpdateRollups
- name: Install only security updates as a scheduled task for Server 2008
win_updates:
category_names: SecurityUpdates
use_scheduled_task: yes
- name: Search-only, return list of found updates (if any), log to C:\ansible_wu.txt
win_updates:
category_names: SecurityUpdates
state: searched
log_path: C:\ansible_wu.txt
- name: Install all security updates with automatic reboots
win_updates:
category_names:
- SecurityUpdates
reboot: yes
- name: Install only particular updates based on the KB numbers
win_updates:
category_name:
- SecurityUpdates
whitelist:
- KB4056892
- KB4073117
- name: Exclude updates based on the update title
win_updates:
category_name:
- SecurityUpdates
- CriticalUpdates
blacklist:
- Windows Malicious Software Removal Tool for Windows
- \d{4}-\d{2} Cumulative Update for Windows Server 2016
# One way to ensure the system is reliable just after a reboot, is to set WinRM to a delayed startup
- name: Ensure WinRM starts when the system has settled and is ready to work reliably
win_service:
name: WinRM
start_mode: delayed
# Optionally, you can increase the reboot_timeout to survive long updates during reboot
- name: Ensure we wait long enough for the updates to be applied during reboot
win_updates:
reboot: yes
reboot_timeout: 3600
'''
RETURN = r'''
reboot_required:
description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot).
returned: success
type: bool
sample: true
updates:
description: List of updates that were found/installed.
returned: success
type: complex
sample:
contains:
title:
description: Display name.
returned: always
type: str
sample: "Security Update for Windows Server 2012 R2 (KB3004365)"
kb:
description: A list of KB article IDs that apply to the update.
returned: always
type: list of strings
sample: [ '3004365' ]
id:
description: Internal Windows Update GUID.
returned: always
type: str (guid)
sample: "fb95c1c8-de23-4089-ae29-fd3351d55421"
installed:
description: Was the update successfully installed.
returned: always
type: bool
sample: true
categories:
description: A list of category strings for this update.
returned: always
type: list of strings
sample: [ 'Critical Updates', 'Windows Server 2012 R2' ]
failure_hresult_code:
description: The HRESULT code from a failed update.
returned: on install failure
type: bool
sample: 2147942402
filtered_updates:
description: List of updates that were found but were filtered based on
I(blacklist), I(whitelist) or I(category_names). The return value is in
the same form as I(updates), along with I(filtered_reason).
returned: success
type: complex
sample: see the updates return value
contains:
filtered_reason:
description: The reason why this update was filtered.
returned: always
type: str
sample: 'skip_hidden'
found_update_count:
description: The number of updates found needing to be applied.
returned: success
type: int
sample: 3
installed_update_count:
description: The number of updates successfully installed.
returned: success
type: int
sample: 2
failed_update_count:
description: The number of updates that failed to install.
returned: always
type: int
sample: 0
'''
|
gpl-3.0
|
AgentN/namebench
|
libnamebench/nameserver.py
|
173
|
18079
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver related activity."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import random
import re
import socket
import sys
import time
# external dependencies (from nb_third_party)
import dns.exception
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdataclass
import dns.rdatatype
import dns.resolver
import dns.reversename
import dns.version
import health_checks
import provider_extensions
import addr_util
import util
# Look for buggy system versions of namebench
if dns.version.hexversion < 17301744:
raise ValueError('dnspython 1.8.0+ required, while only %s was found. The '
'namebench source bundles 1.8.0, so use it.' % dns.version.version)
# How many failures before we disable system nameservers
MAX_NORMAL_FAILURES = 2
MAX_KEEPER_FAILURES = 8
MAX_WARNINGS = 10
FAILURE_PRONE_RATE = 10
# In order of most likely to be important.
PROVIDER_TAGS = ['isp', 'network', 'likely-isp', 'dhcp']
# EVIL IMPORT-TIME SIDE EFFECT
BEST_TIMER_FUNCTION = util.GetMostAccurateTimerFunction()
def ResponseToAscii(response):
if not response:
return None
if response.answer:
answers = [', '.join(map(str, x.items)) for x in response.answer]
return ' -> '.join(answers).rstrip('"').lstrip('"')
else:
return dns.rcode.to_text(response.rcode())
class BrokenSystemClock(Exception):
"""Used if we detect errors with the system clock."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NameServer(health_checks.NameServerHealthChecks, provider_extensions.NameServerProvider):
"""Hold information about a particular nameserver."""
def __init__(self, ip, hostname=None, name=None, tags=None, provider=None,
instance=None, location=None, latitude=None, longitude=None, asn=None,
network_owner=None, dhcp_position=None, system_position=None):
self.ip = ip
self.name = name
self.dhcp_position = dhcp_position
self.system_position = system_position
if tags:
self.tags = set(tags)
else:
self.tags = set()
self.provider = provider
self.instance = instance
self.location = location
if self.location:
self.country_code = location.split('/')[0]
self.tags.add('country_%s' % self.country_code.lower())
else:
self.country_code = None
self.latitude = latitude
self.longitude = longitude
self.asn = asn
self.network_owner = network_owner
self._hostname = hostname
self.timeout = 5
self.health_timeout = 5
self.ping_timeout = 1
self.ResetTestStatus()
self._version = None
self._node_ids = set()
self.timer = BEST_TIMER_FUNCTION
if ':' in self.ip:
self.tags.add('ipv6')
elif '.' in self.ip:
self.tags.add('ipv4')
if self.dhcp_position is not None:
self.tags.add('dhcp')
if self.system_position is not None:
self.tags.add('system')
if ip.endswith('.0') or ip.endswith('.255'):
self.DisableWithMessage("IP appears to be a broadcast address.")
elif self.is_bad:
self.DisableWithMessage("Known bad address.")
def AddNetworkTags(self, domain, provider, asn, country_code):
if self.hostname:
my_domain = addr_util.GetDomainFromHostname(self.hostname)
hostname = self.hostname.lower()
else:
my_domain = 'UNKNOWN'
hostname = ''
if provider:
provider = provider.lower()
if domain and my_domain == domain:
self.tags.add('isp')
if asn and self.asn == asn:
self.tags.add('network')
if provider and 'isp' not in self.tags:
if (provider in self.name.lower() or provider in self.hostname.lower()
or (self.network_owner and provider in self.network_owner.lower())):
self.tags.add('isp')
elif provider and self.country_code == country_code and my_domain != domain:
if (provider in self.name.lower() or provider in hostname
or (self.network_owner and provider in self.network_owner.lower())):
self.tags.add('likely-isp')
def ResetTestStatus(self):
"""Reset testing status of this host."""
self.warnings = set()
self.shared_with = set()
if self.is_disabled:
self.tags.remove('disabled')
self.checks = []
self.failed_test_count = 0
self.share_check_count = 0
self.cache_checks = []
self.is_slower_replica = False
self.ResetErrorCounts()
def ResetErrorCounts(self):
"""NOTE: This gets called by benchmark.Run()!"""
self.request_count = 0
self.failure_count = 0
self.error_map = {}
@property
def is_keeper(self):
return bool(self.MatchesTags(['preferred', 'dhcp', 'system', 'specified']))
@property
def is_bad(self):
if not self.is_keeper and self.MatchesTags(['rejected', 'blacklist']):
return True
@property
def is_hidden(self):
return self.HasTag('hidden')
@property
def is_disabled(self):
return self.HasTag('disabled')
@property
def check_average(self):
# If we only have a ping result, sort by it. Otherwise, use all non-ping results.
if len(self.checks) == 1:
return self.checks[0][3]
else:
return util.CalculateListAverage([x[3] for x in self.checks[1:]])
@property
def fastest_check_duration(self):
if self.checks:
return min([x[3] for x in self.checks])
else:
return 0.0
@property
def check_duration(self):
return sum([x[3] for x in self.checks])
@property
def warnings_string(self):
if self.is_disabled:
return 'DISABLED: %s' % self.disabled_msg
else:
return ', '.join(map(str, self.warnings))
@property
def errors(self):
return ['%s (%s requests)' % (_[0], _[1]) for _ in self.error_map.items() if _[0] != 'Timeout']
@property
def error_count(self):
return sum([_[1] for _ in self.error_map.items() if _[0] != 'Timeout'])
@property
def timeout_count(self):
return self.error_map.get('Timeout', 0)
@property
def notes(self):
"""Return a list of notes about this nameserver object."""
my_notes = []
if self.system_position == 0:
my_notes.append('The current preferred DNS server')
elif self.system_position:
my_notes.append('A backup DNS server for this system')
if self.dhcp_position is not None:
my_notes.append('Assigned by your network DHCP server')
if self.is_failure_prone:
my_notes.append('%s of %s queries failed' % (self.failure_count, self.request_count))
if self.HasTag('blacklist'):
my_notes.append('BEWARE: IP appears in DNS server blacklist')
if self.is_disabled:
my_notes.append(self.disabled_msg)
else:
my_notes.extend(self.warnings)
if self.errors:
my_notes.extend(self.errors)
return my_notes
@property
def hostname(self):
if self._hostname is None and not self.is_disabled:
self.UpdateHostname()
return self._hostname
def UpdateHostname(self):
if not self.is_disabled:
self._hostname = self.GetReverseIp(self.ip)
return self._hostname
@property
def version(self):
if self._version is None and not self.is_disabled:
self.GetVersion()
if not self._version:
return None
# Only return meaningful data
if (re.search('\d', self._version) or
(re.search('recursive|ns|server|bind|unbound', self._version, re.I)
and 'ontact' not in self._version and '...' not in self._version)):
return self._version
else:
return None
@property
def external_ips(self):
"""Return a set of external ips seen on this system."""
# We use a slightly different pattern here because we want to
# append to our results each time this is called.
self._external_ips.add(self.GetMyResolverInfoWithDuration())
# Only return non-blank entries
return [x for x in self._external_ips if x]
@property
def node_ids(self):
"""Return a set of node_ids seen on this system."""
if self.is_disabled:
return []
# Only return non-blank entries
return [x for x in self._node_ids if x]
def UpdateNodeIds(self):
node_id = self.GetNodeIdWithDuration()[0]
self._node_ids.add(node_id)
return node_id
@property
def partial_node_ids(self):
partials = []
for node_id in self._node_ids:
node_bits = node_id.split('.')
if len(node_bits) >= 3:
partials.append('.'.join(node_bits[0:-2]))
else:
partials.append('.'.join(node_bits))
return partials
@property
def name_and_node(self):
if self.node_ids:
return '%s [%s]' % (self.name, ', '.join(self.partial_node_ids))
else:
return self.name
@property
def is_failure_prone(self):
if self.failure_rate >= FAILURE_PRONE_RATE:
return True
else:
return False
@property
def failure_rate(self):
if not self.failure_count or not self.request_count:
return 0
else:
return (float(self.failure_count) / float(self.request_count)) * 100
def __str__(self):
return '%s [%s:%s]' % (self.name, self.ip, ','.join(self.tags))
def __repr__(self):
return self.__str__()
def HasTag(self, tag):
"""Matches one tag."""
return tag in self.tags
def MatchesTags(self, tags):
"""Matches many tags."""
return self.tags.intersection(tags)
def AddFailure(self, message, fatal=False):
"""Add a failure for this nameserver. This will effectively disable it's use."""
respect_fatal = True
if self.is_keeper:
max_count = MAX_KEEPER_FAILURES
else:
max_count = MAX_NORMAL_FAILURES
self.failed_test_count += 1
if self.is_keeper:
respect_fatal = False
# Be quiet if this is simply a 'preferred' ipv6 host.
if self.HasTag('preferred') and self.HasTag('ipv6') and len(self.checks) <= 1:
self.tags.add('disabled')
else:
print "\n* %s failed test #%s/%s: %s" % (self, self.failed_test_count, max_count, message)
if fatal and respect_fatal:
self.DisableWithMessage(message)
elif self.failed_test_count >= max_count:
self.DisableWithMessage("Failed %s tests, last: %s" % (self.failed_test_count, message))
def AddWarning(self, message, penalty=True):
"""Add a warning to a host."""
if not isinstance(message, str):
print "Tried to add %s to %s (not a string)" % (message, self)
return None
self.warnings.add(message)
if penalty and len(self.warnings) >= MAX_WARNINGS:
self.AddFailure('Too many warnings (%s), probably broken.' % len(self.warnings), fatal=True)
def DisableWithMessage(self, message):
self.tags.add('disabled')
if self.is_keeper and not self.HasTag('ipv6'):
print "\nDISABLING %s: %s\n" % (self, message)
else:
self.tags.add('hidden')
self.disabled_msg = message
def CreateRequest(self, record, request_type, return_type):
"""Function to work around any dnspython make_query quirks."""
return dns.message.make_query(record, request_type, return_type)
def Query(self, request, timeout):
# print "%s -> %s" % (request, self)
return dns.query.udp(request, self.ip, timeout, 53)
def TimedRequest(self, type_string, record_string, timeout=None, rdataclass=None):
"""Make a DNS Get, returning the reply and duration it took.
Args:
type_string: DNS record type to query (string)
record_string: DNS record name to query (string)
timeout: optional timeout (float)
rdataclass: optional result class (defaults to rdataclass.IN)
Returns:
A tuple of (response, duration in ms [float], error_msg)
In the case of a DNS response timeout, the response object will be None.
"""
if not rdataclass:
rdataclass = dns.rdataclass.IN
else:
rdataclass = dns.rdataclass.from_text(rdataclass)
request_type = dns.rdatatype.from_text(type_string)
record = dns.name.from_text(record_string, None)
request = None
self.request_count += 1
try:
request = self.CreateRequest(record, request_type, rdataclass)
except ValueError:
if not request:
return (None, 0, util.GetLastExceptionString())
if not timeout:
timeout = self.timeout
error_msg = None
exc = None
duration = None
try:
start_time = self.timer()
response = self.Query(request, timeout)
duration = self.timer() - start_time
except (dns.exception.Timeout), exc:
response = None
except (dns.query.BadResponse, dns.message.TrailingJunk,
dns.query.UnexpectedSource), exc:
error_msg = util.GetLastExceptionString()
response = None
# This is pretty normal if someone runs namebench offline.
except socket.error:
response = None
if ':' in self.ip:
error_msg = 'socket error: IPv6 may not be available.'
else:
error_msg = util.GetLastExceptionString()
# Pass these exceptions up the food chain
except (KeyboardInterrupt, SystemExit, SystemError), exc:
raise exc
except:
error_msg = util.GetLastExceptionString()
print "* Unusual error with %s:%s on %s: %s" % (type_string, record_string, self, error_msg)
response = None
if not response:
self.failure_count += 1
if not duration:
duration = self.timer() - start_time
if exc and not error_msg:
error_msg = '%s: %s' % (record_string, util.GetLastExceptionString())
if error_msg:
key = util.GetLastExceptionString()
self.error_map[key] = self.error_map.setdefault(key, 0) + 1
if duration < 0:
raise BrokenSystemClock('The time on your machine appears to be going backwards. '
'We cannot accurately benchmark due to this error. '
'(timer=%s, duration=%s)' % (self.timer, duration))
return (response, util.SecondsToMilliseconds(duration), error_msg)
def GetVersion(self):
version = ''
(response, duration, _) = self.TimedRequest('TXT', 'version.bind.', rdataclass='CHAOS',
timeout=self.health_timeout)
if response and response.answer:
response_string = ResponseToAscii(response)
version = response_string
self._version = version
return (self._version, duration)
def GetReverseIp(self, ip, retries_left=2):
"""Request a hostname for a given IP address."""
try:
print "reverse: %s -> %s" % (ip, self)
answer = dns.resolver.query(dns.reversename.from_address(ip), 'PTR')
except dns.resolver.NXDOMAIN:
return ip
except:
if retries_left:
print "* Failed to get hostname for %s (retries left: %s): %s" % (ip, retries_left, util.GetLastExceptionString())
return self.GetReverseIp(ip, retries_left=retries_left-1)
else:
return ip
if answer:
return answer[0].to_text().rstrip('.')
else:
return ip
def GetTxtRecordWithDuration(self, record, retries_left=2):
(response, duration, _) = self.TimedRequest('TXT', record, timeout=self.health_timeout)
if response and response.answer:
return (response.answer[0].items[0].to_text().lstrip('"').rstrip('"'), duration)
elif not response and retries_left:
print "* Failed to lookup %s (retries left: %s): %s" % (record, retries_left, util.GetLastExceptionString())
return self.GetTxtRecordWithDuration(record, retries_left=retries_left-1)
else:
return (None, duration)
def GetIpFromNameWithDuration(self, name):
"""Get an IP for a given name with a duration."""
(response, duration, _) = self.TimedRequest('A', name, timeout=self.health_timeout)
if response and response.answer:
ip = response.answer[0].items[0].to_text()
return (ip, duration)
else:
return (None, duration)
def GetNameFromNameWithDuration(self, name):
"""Get a name from another name (with duration). Used for node id lookups."""
(ip, duration) = self.GetIpFromNameWithDuration(name)
if ip:
return (self.GetReverseIp(ip), duration)
else:
return (None, duration)
def GetNodeIdWithDuration(self):
"""Try to determine the node id for this nameserver (tries many methods)."""
node = ''
if self.hostname.endswith('ultradns.net') or self.ip.startswith('156.154.7'):
(node, duration) = self.GetUltraDnsNodeWithDuration()
elif self.ip.startswith('8.8'):
(node, duration) = self.GetMyResolverHostNameWithDuration()
elif self.hostname.endswith('opendns.com') or self.ip.startswith('208.67.22'):
(node, duration) = self.GetOpenDnsNodeWithDuration()
else:
(response, duration, _) = self.TimedRequest('TXT', 'hostname.bind.', rdataclass='CHAOS')
if not response or not response.answer:
(response, duration, _) = self.TimedRequest('TXT', 'id.server.', rdataclass='CHAOS')
if response and response.answer:
node = ResponseToAscii(response)
return (node, duration)
def DistanceFromCoordinates(self, lat, lon):
"""In km."""
if self.latitude:
return util.DistanceBetweenCoordinates(float(lat), float(lon), float(self.latitude), float(self.longitude))
else:
return None
if __name__ == '__main__':
ns = NameServer(sys.argv[1])
print "-" * 64
print "IP: %s" % ns.ip
print "Host: %s" % ns.hostname
print "Version: %s" % ns.version
print "Node: %s" % ns.node_ids
|
apache-2.0
|
hynekcer/django
|
tests/model_fields/models.py
|
210
|
12155
|
import os
import tempfile
import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageField, ImageFieldFile
from django.db.models.fields.related import (
ForeignKey, ForeignObject, ManyToManyField, OneToOneField,
)
from django.utils import six
try:
from PIL import Image
except ImportError:
Image = None
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1).pk
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, models.CASCADE, default=get_foo, related_name=b'bars')
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class UnicodeSlugField(models.Model):
s = models.SlugField(max_length=255, allow_unicode=True)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class DurationModel(models.Model):
field = models.DurationField()
class NullDurationModel(models.Model):
field = models.DurationField(null=True)
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel, models.CASCADE)
nbf = models.ForeignKey(NullBooleanModel, models.CASCADE)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
# field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
field13 = models.GenericIPAddressField("verbose field13", protocol="ipv4")
field14 = models.NullBooleanField("verbose field14")
field15 = models.PositiveIntegerField("verbose field15")
field16 = models.PositiveSmallIntegerField("verbose field16")
field17 = models.SlugField("verbose field17")
field18 = models.SmallIntegerField("verbose field18")
field19 = models.TextField("verbose field19")
field20 = models.TimeField("verbose field20")
field21 = models.URLField("verbose field21")
field22 = models.UUIDField("verbose field22")
field23 = models.DurationField("verbose field23")
class GenericIPAddress(models.Model):
ip = models.GenericIPAddressField(null=True, protocol='ipv4')
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
class AllFieldsModel(models.Model):
big_integer = models.BigIntegerField()
binary = models.BinaryField()
boolean = models.BooleanField(default=False)
char = models.CharField(max_length=10)
csv = models.CommaSeparatedIntegerField(max_length=10)
date = models.DateField()
datetime = models.DateTimeField()
decimal = models.DecimalField(decimal_places=2, max_digits=2)
duration = models.DurationField()
email = models.EmailField()
file_path = models.FilePathField()
floatf = models.FloatField()
integer = models.IntegerField()
generic_ip = models.GenericIPAddressField()
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField()
positive_small_integer = models.PositiveSmallIntegerField()
slug = models.SlugField()
small_integer = models.SmallIntegerField()
text = models.TextField()
time = models.TimeField()
url = models.URLField()
uuid = models.UUIDField()
fo = ForeignObject(
'self',
on_delete=models.CASCADE,
from_fields=['abstract_non_concrete_id'],
to_fields=['id'],
related_name='reverse'
)
fk = ForeignKey(
'self',
models.CASCADE,
related_name='reverse2'
)
m2m = ManyToManyField('self')
oto = OneToOneField('self', models.CASCADE)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
gfk = GenericForeignKey()
gr = GenericRelation(DataModel)
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
class RelatedToUUIDModel(models.Model):
uuid_fk = models.ForeignKey('PrimaryKeyUUIDModel', models.CASCADE)
class UUIDChild(PrimaryKeyUUIDModel):
pass
class UUIDGrandchild(UUIDChild):
pass
|
bsd-3-clause
|
xiaoyaozi5566/GEM5_DRAMSim2
|
src/mem/slicc/generate/tex.py
|
92
|
2816
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.util.code_formatter import code_formatter
class tex_formatter(code_formatter):
braced = "<>"
double_braced = "<<>>"
def printTexTable(sm, code):
tex = tex_formatter()
tex(r'''
%& latex
\documentclass[12pt]{article}
\usepackage{graphics}
\begin{document}
\begin{tabular}{|l||$<<"l" * len(sm.events)>>|} \hline
''')
for event in sm.events:
code(r" & \rotatebox{90}{$<<event.short>>}")
tex(r'\\ \hline \hline')
for state in sm.states:
state_str = state.short
for event in sm.events:
state_str += ' & '
trans = sm.get_transition(state, event)
if trans:
actions = trans.getActionShorthands()
# FIXME: should compare index, not the string
if trans.getNextStateShorthand() != state.short:
nextState = trans.getNextStateShorthand()
else:
nextState = ""
state_str += actions
if nextState and actions:
state_str += '/'
state_str += nextState
tex(r'$0 \\', state_str)
tex(r'''
\hline
\end{tabular}
\end{document}
''')
code.append(tex)
|
bsd-3-clause
|
pygeo/geoval
|
tests/test_mintrend.py
|
1
|
3310
|
# -*- coding: utf-8 -*-
"""
This file is part of GEOVAL.
(c) Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import sys
sys.path.append('..')
import unittest
from geoval.core import GeoData
from geoval.statistic.mintrend import MintrendPlot
try:
import cPickle # p2
except:
import pickle as cPickle # p3
import numpy as np
import tempfile
class TestTrend(unittest.TestCase):
def setUp(self):
self.D = GeoData(None, None)
self.D._init_sample_object(nt=1000, ny=1, nx=1)
self._tmpdir = tempfile.mkdtemp()
@unittest.skip('skipped currently due to python3 cPickle problem')
def _generate_sample_lut(self):
lutname = tempfile.mktemp(suffix='.pkl')
cvs = np.asarray([1.,2.,3.])
means = np.asarray([10.,20.])
phis = np.asarray([0.9])
lut = np.ones((len(phis),len(means), len(cvs)))*np.nan
lut[0,0,0] = 0.1
lut[0,0,1] = 0.2
lut[0,0,2] = np.nan
lut[0,1,0] = 0.4
lut[0,1,1] = 0.5
lut[0,1,2] = 0.6
#lut = np.asarray([[0.1,0.2,np.nan],[0.4,0.5,0.6]])
d = {'cvs' : cvs, 'means' : means, 'res' : lut, 'phis' : phis}
cPickle.dump(d, open(lutname, 'w'))
return lutname, d
def test_mintrend_lut_interpolation(self):
lutname, d = self._generate_sample_lut()
P = MintrendPlot(lutname)
self.assertEqual(P._lutname, lutname)
def test_read_lut(self):
# check that LUT is read properly
lutname, d = self._generate_sample_lut()
P = MintrendPlot(lutname)
self.assertEqual(len(P.cvs),5)
self.assertEqual(len(P.means),5)
self.assertEqual(len(P.lut),5)
self.assertEqual(list(P.lut), [0.1,0.2,0.4,0.5,0.6])
def test_interpolate(self):
lutname, d = self._generate_sample_lut()
P = MintrendPlot(lutname)
#~
# first interpolate to same coordinates as given
# should give same results
# cvs, means, phis
z = P._interpolate_fast([1.], [10.], [0.9])
self.assertEqual(z[0],0.1)
z = P._interpolate_fast([2.], [10.], [0.9])
self.assertEqual(z[0],0.2)
z = P._interpolate_fast([1.], [20.], [0.9])
self.assertEqual(z[0],0.4)
z = P._interpolate_fast([2.], [20.], [0.9])
self.assertEqual(z[0],0.5)
z = P._interpolate_fast([3.], [20.], [0.9])
self.assertEqual(z[0],0.6)
# now check real interpolation
z = P._interpolate_fast([2.], [15.], [0.9])
self.assertEqual(z[0],0.35)
z = P._interpolate_fast([1.], [15.], [0.9])
self.assertEqual(z[0],0.25)
#~ def test_plot(self):
#~ ny = 5
#~ nx = 3
#~ STD = GeoData(None, None)
#~ STD._init_sample_object(nt=None, ny=ny, nx=nx, gaps=True)
#~ STD.mulc(100.,copy=False)
###STD.addc(1.,copy=False)
#~
#~ ME = GeoData(None, None)
#~ ME._init_sample_object(nt=None, ny=ny, nx=nx)
#~ ME.mulc(30.,copy=False)
####ME.addc(10.,copy=False)
#~
#~ lutname, d = self._generate_sample_lut()
#~ P = MintrendPlot(lutname)
#~ P.map_trends(STD, ME)
#~
#~ self.assertTrue(hasattr(P,'X'))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
nvoron23/socialite
|
jython/Lib/test/test_classdecorators.py
|
23
|
1192
|
# This test is temporary until we can import test_decorators from CPython 3.x
# The reason for not doing that already is that in Python 3.x the name of a
# function is stored in func.__name__, in 2.x it's func.func_name
import unittest
from test import test_support
class TestClassDecorators(unittest.TestCase):
def test_simple(self):
def plain(x):
x.extra = 'Hello'
return x
@plain
class C(object): pass
self.assertEqual(C.extra, 'Hello')
def test_double(self):
def ten(x):
x.extra = 10
return x
def add_five(x):
x.extra += 5
return x
@add_five
@ten
class C(object): pass
self.assertEqual(C.extra, 15)
def test_order(self):
def applied_first(x):
x.extra = 'first'
return x
def applied_second(x):
x.extra = 'second'
return x
@applied_second
@applied_first
class C(object): pass
self.assertEqual(C.extra, 'second')
def test_main():
test_support.run_unittest(TestClassDecorators)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
Nikola-K/guessit
|
docs/_themes/flask_theme_support.py
|
2228
|
4875
|
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
lgpl-3.0
|
ishay2b/tensorflow
|
tensorflow/tools/docs/generate_lib_test.py
|
52
|
4345
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for doc generator traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate_lib
from tensorflow.tools.docs import parser
def test_function():
"""Docstring for test_function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
class ChildClass(object):
"""Docstring for a child class."""
class GrandChildClass(object):
"""Docstring for a child of a child class."""
pass
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class GenerateTest(googletest.TestCase):
def test_write(self):
if sys.version_info >= (3, 0):
self.skipTest('Warning: Doc generation is not supported from python3.')
module = sys.modules[__name__]
index = {
'tf': sys, # Can be any module, this test doesn't care about content.
'tf.TestModule': module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
'tf.TestModule.TestClass.ChildClass.GrandChildClass':
TestClass.ChildClass.GrandChildClass,
}
tree = {
'tf': ['TestModule', 'test_function'],
'tf.TestModule': ['test_function', 'TestClass'],
'tf.TestModule.TestClass': ['ChildClass'],
'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],
'tf.TestModule.TestClass.ChildClass.GrandChildClass': []
}
duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}
duplicates = {
'tf.TestModule.test_function': [
'tf.test_function', 'tf.TestModule.test_function'
]
}
base_dir = os.path.dirname(__file__)
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=duplicates,
duplicate_of=duplicate_of,
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir=base_dir)
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))
self.assertTrue(
os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/TestClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir,
'tf/TestModule/TestClass/ChildClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(
output_dir,
'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))
# Make sure that duplicates are not written
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
nvbn/python-social-auth
|
social/backends/dropbox.py
|
3
|
1431
|
"""
Dropbox OAuth support.
This contribution adds support for Dropbox OAuth service. The settings
DROPBOX_APP_ID and DROPBOX_API_SECRET must be defined with the values
given by Dropbox application registration process.
By default account id and token expiration time are stored in extra_data
field, check OAuthBackend class for details on how to extend it.
"""
from social.backends.oauth import BaseOAuth1
class DropboxOAuth(BaseOAuth1):
"""Dropbox OAuth authentication backend"""
name = 'dropbox'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth/authorize'
REQUEST_TOKEN_URL = 'https://api.dropbox.com/1/oauth/request_token'
REQUEST_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_URI_PARAMETER_NAME = 'oauth_callback'
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Dropbox account"""
return {'username': str(response.get('uid')),
'email': response.get('email'),
'first_name': response.get('display_name')}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.dropbox.com/1/account/info',
auth=self.oauth_auth(access_token))
|
bsd-3-clause
|
blorgon9000/pyopus
|
pyopus/misc/sobol.py
|
1
|
1725
|
"""
.. inheritance-diagram:: pyopus.misc.sobol
:parts: 1
**Sobol sequence generator**
Details can be found in
[joekuo1] S. Joe and F. Y. Kuo, Remark on Algorithm 659: Implementing Sobol's
quasirandom sequence generator, ACM Trans. Math. Softw. 29, 49-57 (2003).
[joekuo2] S. Joe and F. Y. Kuo, Constructing Sobol sequences with better two-dimensional
projections, SIAM J. Sci. Comput. 30, 2635-2654 (2008).
The code is a modification (efficiency reasons) of the code published at
http://web.maths.unsw.edu.au/~fkuo/sobol/
"""
import _sobol
import numpy as np
import copy
__all__ = [ 'Sobol' ]
class Sobol(object):
"""
Constructs a Sobol sequence generator with dimension *dim*.
The sequence members are in graycode order.
"""
def __init__(self, dim):
self.dim=dim
self.V=_sobol.precompute(self.dim, 32)
self.reset()
def reset(self):
"""
Resets the generator.
"""
self.index=np.zeros(1, dtype=np.uint32)
self.X=np.zeros(self.dim, dtype=np.uint32)
def skip(self, n):
"""
Skips *n* values.
"""
_sobol.generate(self.dim, 32, self.V, self.index, self.X, n, 0)
def get(self, n):
"""
Returns *n* values as rows of a matrix with *dim* columns.
"""
return _sobol.generate(self.dim, 32, self.V, self.index, self.X, n, 1)
def clone(self):
"""
Returns a clone of self.
"""
return copy.deepcopy(self)
def get_state(self):
"""
Returns the state of the generator.
"""
return (self.dim, self.V.copy(), self.index.copy(), self.X.copy())
def set_state(self, state):
"""
Sets the state of the generator.
"""
self.dim=state[0]
self.V=state[1].copy()
self.index=state[2].copy()
self.X=state[3].copy()
|
gpl-3.0
|
prospwro/odoo
|
addons/irsid_edu_training/models/core.py
|
2
|
7407
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution Addon
# Copyright (C) 2009-2013 IRSID (<http://irsid.ru>),
# Paul Korotkov (korotkov.paul@gmail.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
EDU_STATES = [
('draft', 'New'),
('entrance', 'Entrance'),
('open', 'Training in Progress'),
('pending', 'Training Suspended'),
('done', 'Training Done'),
('canceled', 'Training Canceled'),
]
EDU_DOC_STATES = [
('draft', 'New'),
('confirmed', 'On Validation'),
('validated', 'On Approval'),
('approved', 'Approved'),
('done', 'Done'),
('rejected', 'Rejected'),
('canceled', 'Canceled'),
]
EDU_TRANSITIONS = [
('admission', 'Admission'),
('enrollment', 'Enrollment'),
('transfer', 'Transfer'),
('dismissal', 'Dismissal'),
('other', 'Other'),
]
EDU_RECORD_TYPES = [
('module', 'Regular Module'),
('practice', 'Practice'),
('coursework', 'Course Work'),
('finalexam', 'Final Exam'),
('graduatework ', 'Graduate Work'),
('other', 'Other'),
]
EDU_JOURNAL_TYPES = [
('statediploma', 'State Diploma'),
('credits', 'Credits'),
]
# Abstract Academic Document
class edu_doc(osv.AbstractModel):
_name = 'edu.doc'
_description = 'Academic Document'
_inherit = ['mail.thread','ir.needaction_mixin']
# Workflow Functions
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'draft',
'date_approved': False,
}, context=context)
return True
def set_confirmed(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'confirmed',
}, context=context)
return True
def set_validated(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'validated',
}, context=context)
return True
def set_approved(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'approved',
'date_approved': fields.date.context_today(self, cr, uid, context=context),
'user_approved': uid,
}, context=context)
return True
def set_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'done',
}, context=context)
return True
def set_rejected(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'rejected',
}, context=context)
return True
def set_canceled(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state':'canceled',
}, context=context)
return True
# Access Functions
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state': 'draft',
'date_approved': False,
})
return super(edu_doc, self).copy(cr, uid, id, default, context=context)
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for record in self.browse(cr, uid, ids, context=context):
if record.state not in ['draft']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete document in state \'%s\'.') % record.state)
return super(edu_doc, self).unlink(cr, uid, ids, context=context)
# Other Functions
def _get_user_job_validated(self, cr, uid, ids, field_name, arg, context=None):
user_ids = [doc.user_validated.id for doc in self.browse(cr, uid, ids, context=context)]
employee_obj = self.pool.get('hr.employee')
employee_ids = employee_obj.search(cr, uid, [('resource_id.user_id','in',user_ids)], context=context)
employee_dict = dict([(employee.resource_id.user_id.id, employee.job_id.id) for employee in employee_obj.browse(cr, uid, employee_ids, context=context)])
result = {}
for doc in self.browse(cr, uid, ids, context=context):
result[doc.id] = employee_dict.get(doc.user_validated.id, False)
return result
def _get_user_job_approved(self, cr, uid, ids, field_name, arg, context=None):
user_ids = [doc.user_approved.id for doc in self.browse(cr, uid, ids, context=context)]
employee_obj = self.pool.get('hr.employee')
employee_ids = employee_obj.search(cr, uid, [('resource_id.user_id','in',user_ids)], context=context)
employee_dict = dict([(employee.resource_id.user_id.id, employee.job_id.id) for employee in employee_obj.browse(cr, uid, employee_ids, context=context)])
result = {}
for doc in self.browse(cr, uid, ids, context=context):
result[doc.id] = employee_dict.get(doc.user_approved.id, False)
return result
# OpenChatter functions
def _needaction_domain_get(self, cr, uid, context=None):
if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_rector'):
dom = [('state', '=', 'validated')]
return dom
if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_prorector'):
dom = [('state', '=', 'confirmed')]
return dom
if self.pool.get('res.users').has_group(cr, uid, 'irsid_edu.group_edu_manager'):
dom = [('state', 'in', ['draft'])]
return dom
return False
# Fields
_columns = {
'date_validated': fields.date(
'Date Of Validation',
readonly = True,
),
'user_validated': fields.many2one(
'res.users',
'Validated By',
readonly = True,
),
'user_job_validated': fields.function(
_get_user_job_validated,
type='many2one',
obj='hr.job',
string = 'Job of Validator',
readonly = True,
),
'date_approved': fields.date(
'Date Of Approval',
readonly = True,
),
'user_approved': fields.many2one(
'res.users',
'Approved By',
readonly = True,
),
'user_job_approved': fields.function(
_get_user_job_approved,
type='many2one',
obj='hr.job',
string = 'Job of Approver',
readonly = True,
),
'state': fields.selection(
EDU_DOC_STATES,
'State Of Document',
readonly = True,
track_visibility='onchange',
),
}
# Default Values
_defaults = {
'state': 'draft',
}
|
agpl-3.0
|
lucker6666/m4loc
|
corpus/corpus-cleaning-tool/src/scripts/corpustool/common/weird_align_clean.py
|
3
|
7477
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Adobe Systems Incorporated
#
# This file is part of TMX to Moses Corpus Tool.
#
# TMX to Moses Corpus Tool is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TMX to Moses Corpus Tool is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TMX to Moses Corpus Tool If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import sys
from optparse import OptionParser
from xml.dom.minidom import parse, parseString
from corpustool.lib.logger import log_start
from corpustool.lib.logger import log_done
def filter(pcconfig):
log_start("diff_align")
ext = ".diff_align"
config = pcconfig.config
src_filename = config.getCorpusFile(config.src, pcconfig.target, config.src)
target_filename = config.getCorpusFile(config.src, pcconfig.target, pcconfig.target)
xml = pcconfig.xml_frag
doc = parseString(xml)
elems = doc.getElementsByTagName("diff")
diff_threshold = int(elems[0].firstChild.data)
clean_weird_diff_align(src_filename, target_filename, diff_threshold)
log_done("diff_align")
def main():
main_difference()
# main_ratio()
def main_difference():
progname = sys.argv[0]
usage = """%prog src-corpus target-corpus threshold
Clean the wired-align sentences according to threshold for difference of token number between source and target."""
parser = OptionParser(usage, version="%prog 0.2")
(options, args) = parser.parse_args()
if len(args) != 3 :
parser.error("wrong arguments.")
src_path, target_path, threshold_str = args
if not threshold_str.isdigit() :
parser.error("threshold must be an integer.")
threshold = int(threshold_str)
src_path = os.path.abspath(src_path)
target_path = os.path.abspath(target_path)
if not os.path.isfile(src_path) :
print progname + ": " + "cannot stat '" + os.path.basename(src_path) + "': " + "No such file."
sys.exit(2)
if not os.path.isfile(target_path) :
print progname + ": " + "cannot stat '" + os.path.basename(target_path) + "': " + "No such file."
sys.exit(2)
try:
srcfile = open(src_path, 'r+')
except IOError:
print "Cannot open the file '" + os.path.basename(src_path) + "' for reading and writing."
sys.exit(2)
srcfile.close()
try:
targetfile = open(target_path, 'r+')
except IOError:
print "Cannot open the file '" + os.path.basename(target_path) + "' for reading and writing."
sys.exit(2)
targetfile.close()
clean_weird_diff_align(src_path, target_path, threshold)
def clean_weird_diff_align(src_path, target_path, threshold):
srcfile = open(src_path, 'r')
targetfile = open(target_path, 'r')
srcfile_weirdcleaned = open(src_path + ".weirdcleaned", 'w')
targetfile_weirdcleaned = open(target_path + ".weirdcleaned", 'w')
for srcline in srcfile:
targetline = targetfile.readline()
if targetline == None :
break
num_src = len(srcline.split())
num_target = len(targetline.split())
if not is_weird_diff_align(num_src, num_target, threshold) :
srcfile_weirdcleaned.write(srcline)
targetfile_weirdcleaned.write(targetline)
srcfile_weirdcleaned.close()
targetfile_weirdcleaned.close()
srcfile.close()
targetfile.close()
shutil.copyfile(src_path+".weirdcleaned", src_path)
shutil.copyfile(target_path+".weirdcleaned", target_path)
# shutil.move(src_path+".weirdcleaned", src_path)
# shutil.move(target_path+".weirdcleaned", target_path)
def is_weird_diff_align(num_src, num_target, threshold) :
if ( num_src == 0 ) or (num_target == 0 ):
return True
diff = abs( num_src - num_target )
return True if ( diff > threshold ) else False
def main_ratio():
progname = sys.argv[0]
usage = """%prog [options] src-corpus target-corpus
Clean the wired-align sentences according to threshold of token number src/target."""
parser = OptionParser(usage, version="%prog 0.1")
parser.add_option("-g", "--greater", dest="threshold_g", metavar="THRESHOLD", type="float",
help="set the threshold for greater predication.")
parser.add_option("-l", "--less", dest="threshold_l", metavar="THRESHOLD", type="float",
help="set the threshold for less predication.")
(options, args) = parser.parse_args()
if len(args) != 2 :
parser.error("wrong arguments.")
if (options.threshold_g == None) and (options.threshold_l == None) :
parser.error("please specify one threshold.")
if (options.threshold_g != None ) and (options.threshold_l != None) :
parser.error("only one threshold can be specified now.")
src_path, target_path = args
src_path = os.path.abspath(src_path)
target_path = os.path.abspath(target_path)
if not os.path.isfile(src_path) :
print progname + ": " + "cannot stat '" + os.path.basename(src_path) + "': " + "No such file."
sys.exit(2)
if not os.path.isfile(target_path) :
print progname + ": " + "cannot stat '" + os.path.basename(target_path) + "': " + "No such file."
sys.exit(2)
try:
srcfile = open(src_path, 'r+')
except IOError:
print "Cannot open the file '" + os.path.basename(src_path) + "' for reading and writing."
sys.exit(2)
srcfile.close()
try:
targetfile = open(target_path, 'r+')
except IOError:
print "Cannot open the file '" + os.path.basename(target_path) + "' for reading and writing."
sys.exit(2)
targetfile.close()
clean_weird_ratio_align(src_path, target_path, options.threshold_g, options.threshold_l)
def is_weird_ratio_align(num_src, num_target, th_g, th_l) :
if ( num_src == 0 ) or (num_target == 0 ):
return True
ratio = float(num_src)/float(num_target)
if th_g == None :
return True if ( ratio < th_l ) else False
else:
return True if ( ratio > th_g ) else False
def clean_weird_ratio_align(src_path, target_path, th_g, th_l):
srcfile = open(src_path, 'r')
targetfile = open(target_path, 'r')
srcfile_weirdcleaned = open(src_path + ".weirdcleaned", 'w')
targetfile_weirdcleaned = open(target_path + ".weirdcleaned", 'w')
for srcline in srcfile:
targetline = targetfile.readline()
if targetline == None :
break
num_src = len(srcline.split())
num_target = len(targetline.split())
if not is_weird_ratio_align(num_src, num_target, th_g, th_l) :
srcfile_weirdcleaned.write(srcline)
targetfile_weirdcleaned.write(targetline)
srcfile_weirdcleaned.close()
targetfile_weirdcleaned.close()
srcfile.close()
targetfile.close()
shutil.move(src_path+".weirdcleaned", src_path)
shutil.move(target_path+".weirdcleaned", target_path)
if __name__ == "__main__":
# put unit test here.
main()
|
lgpl-3.0
|
yakky/django-cms
|
cms/tests/test_page_user_group_admin.py
|
3
|
18350
|
# -*- coding: utf-8 -*-
from django.contrib.messages.storage.cookie import CookieStorage
from django.forms.models import model_to_dict
from django.test.utils import override_settings
from cms.models.permissionmodels import PageUserGroup
from cms.test_utils.testcases import CMSTestCase
from cms.utils.urlutils import admin_reverse
class PermissionsOnTestCase(CMSTestCase):
def _group_exists(self, name=None):
if not name:
name = 'Test group'
return PageUserGroup.objects.filter(name=name).exists()
def _get_group_data(self, **kwargs):
data = {
'name': 'Test group',
'can_add_page': 'on',
'can_change_page': 'on',
'can_delete_page': 'on',
}
data.update(**kwargs)
return data
def _get_group(self, created_by=None):
if not created_by:
created_by = self.get_superuser()
data = {
'name': 'Test group',
'created_by': created_by,
}
return PageUserGroup.objects.create(**data)
@override_settings(CMS_PERMISSION=True)
class PermissionsOnGlobalTest(PermissionsOnTestCase):
def test_group_in_admin_index(self):
endpoint = admin_reverse('app_list', args=['cms'])
staff_user = self.get_staff_user_with_no_permissions()
self.add_permission(staff_user, 'change_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=True)
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<a href="/en/admin/cms/pageusergroup/">User groups (page)</a>',
html=True,
)
endpoint = self.get_admin_url(PageUserGroup, 'changelist')
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
def test_group_not_in_admin_index(self):
endpoint = admin_reverse('app_list', args=['cms'])
staff_user = self.get_staff_user_with_no_permissions()
self.add_permission(staff_user, 'change_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=False)
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 404)
endpoint = self.get_admin_url(PageUserGroup, 'changelist')
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 403)
def test_user_can_add_group(self):
endpoint = self.get_admin_url(PageUserGroup, 'add')
redirect_to = admin_reverse('index')
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data()
self.add_permission(staff_user, 'add_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=True)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
self.assertTrue(self._group_exists())
def test_user_cant_add_group(self):
endpoint = self.get_admin_url(PageUserGroup, 'add')
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data()
self.add_permission(staff_user, 'add_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=False)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertFalse(self._group_exists())
def test_user_can_change_group(self):
group = self._get_group()
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
redirect_to = self.get_admin_url(PageUserGroup, 'changelist')
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data(name='New test group')
self.add_permission(staff_user, 'change_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=True)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
self.assertTrue(self._group_exists('New test group'))
def test_user_cant_change_group(self):
group = self._get_group()
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data(name='New test group')
self.add_permission(staff_user, 'change_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=False)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(self._group_exists())
def test_user_can_delete_group(self):
group = self._get_group()
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
redirect_to = admin_reverse('index')
staff_user = self.get_staff_user_with_no_permissions()
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=True)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
self.assertFalse(self._group_exists())
def test_user_cant_delete_group(self):
group = self._get_group()
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
staff_user = self.get_staff_user_with_no_permissions()
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_global_permission(staff_user, can_change_permissions=False)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(self._group_exists())
@override_settings(CMS_PERMISSION=True)
class PermissionsOnPageTest(PermissionsOnTestCase):
"""
Uses PagePermission
"""
def setUp(self):
self._permissions_page = self.get_permissions_test_page()
def test_group_in_admin_index(self):
endpoint = admin_reverse('app_list', args=['cms'])
staff_user = self.get_staff_user_with_no_permissions()
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<a href="/en/admin/cms/pageusergroup/">User groups (page)</a>',
html=True,
)
endpoint = self.get_admin_url(PageUserGroup, 'changelist')
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
def test_group_not_in_admin_index(self):
endpoint = admin_reverse('app_list', args=['cms'])
staff_user = self.get_staff_user_with_no_permissions()
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=False,
)
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 404)
endpoint = self.get_admin_url(PageUserGroup, 'changelist')
with self.login_user_context(staff_user):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 403)
def test_user_can_add_group(self):
"""
User can add new groups if can_change_permissions
is set to True.
"""
endpoint = self.get_admin_url(PageUserGroup, 'add')
redirect_to = admin_reverse('index')
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data()
self.add_permission(staff_user, 'add_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
self.assertTrue(self._group_exists())
def test_user_cant_add_group(self):
"""
User can't add new groups if can_change_permissions
is set to False.
"""
endpoint = self.get_admin_url(PageUserGroup, 'add')
staff_user = self.get_staff_user_with_no_permissions()
data = self._get_group_data()
self.add_permission(staff_user, 'add_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=False,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertFalse(self._group_exists())
def test_user_can_change_subordinate_group(self):
"""
User can change groups he created if can_change_permissions
is set to True.
"""
staff_user = self.get_staff_user_with_no_permissions()
group = self._get_group(created_by=staff_user)
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
data = model_to_dict(group)
data['_continue'] = '1'
data['name'] = 'New test group'
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, endpoint)
self.assertTrue(self._group_exists('New test group'))
def test_user_cant_change_subordinate_group(self):
"""
User cant change groups he created if can_change_permissions
is set to False.
"""
staff_user = self.get_staff_user_with_no_permissions()
group = self._get_group(created_by=staff_user)
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
data = model_to_dict(group)
data['_continue'] = '1'
data['name'] = 'New test group'
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=False,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertFalse(self._group_exists('New test group'))
def test_user_cant_change_own_group(self):
"""
User cant change a group he's a part of,
even with can_change_permissions set to True.
"""
group = self._get_group()
staff_user = self.get_staff_user_with_no_permissions()
staff_user.groups.add(group)
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
redirect_to = admin_reverse('index')
data = model_to_dict(group)
data['_continue'] = '1'
data['name'] = 'New test group'
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
msgs = CookieStorage(response)._decode(response.cookies['messages'].value)
self.assertTrue(msgs[0], PageUserGroup._meta.verbose_name)
self.assertTrue(msgs[0], 'ID "%s"' % group.pk)
self.assertFalse(self._group_exists('New test group'))
def test_user_cant_change_others_group(self):
"""
User cant change a group created by another user,
even with can_change_permissions set to True.
"""
admin = self.get_superuser()
group = self._get_group(created_by=admin)
staff_user = self.get_staff_user_with_no_permissions()
endpoint = self.get_admin_url(PageUserGroup, 'change', group.pk)
redirect_to = admin_reverse('index')
data = model_to_dict(group)
data['_continue'] = '1'
data['name'] = 'New test group'
self.add_permission(staff_user, 'change_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
msgs = CookieStorage(response)._decode(response.cookies['messages'].value)
self.assertTrue(msgs[0], PageUserGroup._meta.verbose_name)
self.assertTrue(msgs[0], 'ID "%s"' % group.pk)
self.assertFalse(self._group_exists('New test group'))
def test_user_can_delete_subordinate_group(self):
"""
User can delete groups he created if can_change_permissions
is set to True.
"""
staff_user = self.get_staff_user_with_no_permissions()
group = self._get_group(created_by=staff_user)
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
redirect_to = admin_reverse('index')
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
self.assertFalse(self._group_exists())
def test_user_cant_delete_subordinate_group(self):
"""
User cant delete groups he created if can_change_permissions
is set to False.
"""
staff_user = self.get_staff_user_with_no_permissions()
group = self._get_group(created_by=staff_user)
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=False,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 403)
self.assertTrue(self._group_exists())
def test_user_cant_delete_own_group(self):
"""
User cant delete a group he's a part of,
even with can_change_permissions set to True.
"""
group = self._get_group()
staff_user = self.get_staff_user_with_no_permissions()
staff_user.groups.add(group)
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
redirect_to = admin_reverse('index')
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
msgs = CookieStorage(response)._decode(response.cookies['messages'].value)
self.assertTrue(msgs[0], PageUserGroup._meta.verbose_name)
self.assertTrue(msgs[0], 'ID "%s"' % group.pk)
self.assertTrue(self._group_exists())
def test_user_cant_delete_others_group(self):
"""
User cant delete a group created by another user,
even with can_change_permissions set to True.
"""
admin = self.get_superuser()
group = self._get_group(created_by=admin)
staff_user = self.get_staff_user_with_no_permissions()
endpoint = self.get_admin_url(PageUserGroup, 'delete', group.pk)
redirect_to = admin_reverse('index')
data = {'post': 'yes'}
self.add_permission(staff_user, 'delete_group')
self.add_permission(staff_user, 'delete_pageusergroup')
self.add_page_permission(
staff_user,
self._permissions_page,
can_change_permissions=True,
)
with self.login_user_context(staff_user):
response = self.client.post(endpoint, data)
self.assertRedirects(response, redirect_to)
msgs = CookieStorage(response)._decode(response.cookies['messages'].value)
self.assertTrue(msgs[0], PageUserGroup._meta.verbose_name)
self.assertTrue(msgs[0], 'ID "%s"' % group.pk)
self.assertTrue(self._group_exists())
|
bsd-3-clause
|
dcroc16/skunk_works
|
google_appengine/google/appengine/_internal/django/utils/translation/trans_real.py
|
23
|
20698
|
"""Translation helper functions."""
import locale
import os
import re
import sys
import warnings
import gettext as gettext_module
from cStringIO import StringIO
from google.appengine._internal.django.utils.importlib import import_module
from google.appengine._internal.django.utils.safestring import mark_safe, SafeData
from google.appengine._internal.django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4.
"""
def __init__(self, *args, **kw):
from google.appengine._internal.django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
if isinstance(language, basestring) and language == 'no':
warnings.warn(
"The use of the language code 'no' is deprecated. "
"Please use the 'nb' translation instead.",
PendingDeprecationWarning
)
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from google.appengine._internal.django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from google.appengine._internal.django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from google.appengine._internal.django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from google.appengine._internal.django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from google.appengine._internal.django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
for t in Lexer(src, None).tokenize():
if intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(lambda x, y: -cmp(x[1], y[1]))
return result
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# and are kept for backward compatibility.
# Note, it's also important to keep format names marked for translation.
# For compatibility we still want to have formats on translation catalogs.
# That makes template code like {{ my_date|date:_('DATE_FORMAT') }} still work
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
PendingDeprecationWarning
)
from google.appengine._internal.django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_partial_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
PendingDeprecationWarning
)
from google.appengine._internal.django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
|
mit
|
freaker2k7/chalice
|
chalice/local.py
|
1
|
7587
|
"""Dev server used for running a chalice app locally.
This is intended only for local development purposes.
"""
import functools
from collections import namedtuple
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
from chalice.app import Chalice # noqa
from typing import List, Any, Dict, Tuple, Callable # noqa
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
MatchResult = namedtuple('MatchResult', ['route', 'captured', 'query_params'])
EventType = Dict[str, Any]
HandlerCls = Callable[..., 'ChaliceRequestHandler']
ServerCls = Callable[..., 'HTTPServer']
def create_local_server(app_obj, port):
# type: (Chalice, int) -> LocalDevServer
return LocalDevServer(app_obj, port)
class RouteMatcher(object):
def __init__(self, route_urls):
# type: (List[str]) -> None
# Sorting the route_urls ensures we always check
# the concrete routes for a prefix before the
# variable/capture parts of the route, e.g
# '/foo/bar' before '/foo/{capture}'
self.route_urls = sorted(route_urls)
def match_route(self, url):
# type: (str) -> MatchResult
"""Match the url against known routes.
This method takes a concrete route "/foo/bar", and
matches it against a set of routes. These routes can
use param substitution corresponding to API gateway patterns.
For example::
match_route('/foo/bar') -> '/foo/{name}'
"""
# Otherwise we need to check for param substitution
parsed_url = urlparse(url)
query_params = {k: v[0] for k, v in parse_qs(parsed_url.query).items()}
parts = parsed_url.path.split('/')
captured = {}
for route_url in self.route_urls:
url_parts = route_url.split('/')
if len(parts) == len(url_parts):
for i, j in zip(parts, url_parts):
if j.startswith('{') and j.endswith('}'):
captured[j[1:-1]] = i
continue
if i != j:
break
else:
return MatchResult(route_url, captured, query_params)
raise ValueError("No matching route found for: %s" % url)
class LambdaEventConverter(object):
"""Convert an HTTP request to an event dict used by lambda."""
def __init__(self, route_matcher):
# type: (RouteMatcher) -> None
self._route_matcher = route_matcher
def create_lambda_event(self, method, path, headers, body=None):
# type: (str, str, Dict[str, str], str) -> EventType
view_route = self._route_matcher.match_route(path)
if body is None:
body = '{}'
return {
'requestContext': {
'httpMethod': method,
'resourcePath': view_route.route,
},
'headers': dict(headers),
'queryStringParameters': view_route.query_params,
'body': body,
'pathParameters': view_route.captured,
'stageVariables': {},
}
class ChaliceRequestHandler(BaseHTTPRequestHandler):
protocol = 'HTTP/1.1'
def __init__(self, request, client_address, server, app_object):
# type: (bytes, Tuple[str, int], HTTPServer, Chalice) -> None
self.app_object = app_object
self.event_converter = LambdaEventConverter(
RouteMatcher(list(app_object.routes)))
BaseHTTPRequestHandler.__init__(
self, request, client_address, server) # type: ignore
def _generic_handle(self):
# type: () -> None
lambda_event = self._generate_lambda_event()
self._do_invoke_view_function(lambda_event)
def _do_invoke_view_function(self, lambda_event):
# type: (EventType) -> None
lambda_context = None
response = self.app_object(lambda_event, lambda_context)
self._send_http_response(lambda_event, response)
def _send_http_response(self, lambda_event, response):
# type: (EventType, Dict[str, Any]) -> None
self.send_response(response['statusCode'])
self.send_header('Content-Length', str(len(response['body'])))
self.send_header(
'Content-Type',
response['headers'].get('Content-Type', 'application/json'))
headers = response['headers']
for header in headers:
self.send_header(header, headers[header])
self.end_headers()
self.wfile.write(response['body'])
def _generate_lambda_event(self):
# type: () -> EventType
content_length = int(self.headers.get('content-length', '0'))
body = None
if content_length > 0:
body = self.rfile.read(content_length)
# mypy doesn't like dict(self.headers) so I had to use a
# dictcomp instead to make it happy.
converted_headers = {key: value for key, value in self.headers.items()}
lambda_event = self.event_converter.create_lambda_event(
method=self.command, path=self.path, headers=converted_headers,
body=body,
)
return lambda_event
do_GET = do_PUT = do_POST = do_HEAD = do_DELETE = do_PATCH = \
_generic_handle
def do_OPTIONS(self):
# type: () -> None
# This can either be because the user's provided an OPTIONS method
# *or* this is a preflight request, which chalice automatically
# sets up for you.
lambda_event = self._generate_lambda_event()
if self._has_user_defined_options_method(lambda_event):
self._do_invoke_view_function(lambda_event)
else:
# Otherwise this is a preflight request which we automatically
# generate.
self._send_autogen_options_response()
def _cors_enabled_for_route(self, lambda_event):
# type: (EventType) -> bool
route_key = lambda_event['requestContext']['resourcePath']
route_entry = self.app_object.routes[route_key]
return route_entry.cors
def _has_user_defined_options_method(self, lambda_event):
# type: (EventType) -> bool
route_key = lambda_event['requestContext']['resourcePath']
route_entry = self.app_object.routes[route_key]
return 'OPTIONS' in route_entry.methods
def _send_autogen_options_response(self):
# type:() -> None
self.send_response(200)
self.send_header(
'Access-Control-Allow-Headers',
'Content-Type,X-Amz-Date,Authorization,'
'X-Api-Key,X-Amz-Security-Token'
)
self.send_header('Access-Control-Allow-Methods',
'GET,HEAD,PUT,POST,OPTIONS')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
class LocalDevServer(object):
def __init__(self, app_object, port, handler_cls=ChaliceRequestHandler,
server_cls=HTTPServer):
# type: (Chalice, int, HandlerCls, ServerCls) -> None
self.app_object = app_object
self.port = port
self._wrapped_handler = functools.partial(
handler_cls, app_object=app_object)
self.server = server_cls(('', port), self._wrapped_handler)
def handle_single_request(self):
# type: () -> None
self.server.handle_request()
def serve_forever(self):
# type: () -> None
print "Serving on localhost:%s" % self.port
self.server.serve_forever()
|
apache-2.0
|
eoss-cloud/madxxx_catalog_api
|
catalog/manage/sentinelcatalog.py
|
1
|
4889
|
#-*- coding: utf-8 -*-
""" EOSS catalog system
Implementation of ESA sentinel1/2 catalog access
(https://scihub.copernicus.eu)
Users need to register at the scihub page to get access to their catalog system. These credentials are set by SENTINEL_USER and SENTINEL_PASSWORD
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "twehrmann@eoss.cloud"
__status__ = "Production"
import requests
from manage import ICatalog
from model.plain_models import CopernicusSciHubContainer, S3PublicContainer, Catalog_Dataset
from utilities import read_OS_var
from utilities.web_utils import public_key_exists
from utilities.web_utils import remote_file_exists
from shapely.geometry import Polygon
from shapely.wkt import dumps as wkt_dumps
SENTINEL_S3_HTTP_ZIP_BASEURL = 'http://sentinel-s2-l1c.s3-website.eu-central-1.amazonaws.com/zips/'
SENTINEL_S3_HTTP_BASEURL = 'http://sentinel-s2-l1c.s3-website.eu-central-1.amazonaws.com/'
SENTINEL_S3_BUCKET = 'sentinel-s2-l1c'
class SentinelCatalog(ICatalog):
"""
SentinelCatalog class
needs OS vars for copernicus service authentification: SENTINEL_USER, SENTINEL_PASSWORD
"""
sensors = ['sentinel1', 'sentinel2']
url = 'https://scihub.copernicus.eu/apihub/search?format=%s&rows=%d' % ('json', 100)
def __init__(self):
self.user = read_OS_var('SENTINEL_USER', mandatory=True)
self.pwd = read_OS_var('SENTINEL_PASSWORD', mandatory=True)
def find(self, provider, aoi, date_start, date_stop, clouds=None):
session = requests.Session()
session.auth = (self.user, self.pwd)
session.stream = True
acquisition_date = '(beginPosition:[%s TO %s])' % (
date_start.strftime('%Y-%m-%dT%H:%M:%SZ'),
date_stop.strftime('%Y-%m-%dT%H:%M:%SZ')
)
poly = Polygon(aoi)
geometry = wkt_dumps(poly)
query_area = ' AND (footprint:"Intersects(%s)")' % geometry
query = ''.join([acquisition_date, query_area])
response = requests.post(self.url, dict(q=query), auth=session.auth)
assert response.status_code == requests.codes.ok, 'Connection to copernicus server went wrong [%d]. Please check %s. \\n%s' % \
(response.status_code, self.url, response.text)
products = response.json()['feed']['entry']
datasets = set()
print products
for p in products:
ds = Catalog_Dataset()
ds.entity_id = p['title']
ds.acq_time = next(x for x in p["date"] if x["name"] == "beginposition")["content"]
ds.sensor = next(x for x in p["str"] if x["name"] == "platformname")["content"]
resource_url = next(x for x in p["link"] if len(x.keys()) == 1)["href"]
if ds.sensor == 'Sentinel-2':
# ds.tile_identifier = r['tile_identifier']
ds.clouds = p['double']['content']
ds.level = next(x for x in p["str"] if x["name"] == "processinglevel")["content"]
daynight = 'day'
if next(x for x in p["str"] if x["name"] == "orbitdirection")["content"] != 'DESCENDING':
daynight = 'night'
ds.daynight = daynight
cop = CopernicusSciHubContainer()
cop.http = resource_url
container = cop.to_dict()
s3 = S3PublicContainer()
if remote_file_exists(SENTINEL_S3_HTTP_ZIP_BASEURL + ds.entity_id + '.zip'):
s3.http = SENTINEL_S3_HTTP_ZIP_BASEURL + ds.entity_id + '.zip'
if public_key_exists('sentinel-s2-l1c', 'zips/%s.zip' % ds.entity_id):
s3.bucket = SENTINEL_S3_BUCKET
s3.prefix = 'zips/%s.zip' % ds.entity_id
if s3.http != None or s3.bucket != None:
container.update(s3.to_dict())
# print s3.to_dict()
ds.container = container
datasets.add(ds)
return datasets
def register(self, ds):
raise Exception('Cannot register dataset in repository %s' % self.url)
if __name__ == '__main__':
from pytz import UTC
from datetime import datetime, timedelta
ag_season_start = datetime(2016, 6, 2, tzinfo=UTC)
ag_season_end = datetime(2016, 10, 6, tzinfo=UTC)
aoi_nw = (-94.21561717987059, 35.26342169967158)
aoi_se = (-94.21304225921631, 35.265278832862336)
aoi_ne = (aoi_se[0], aoi_nw[1])
aoi_sw = (aoi_nw[0], aoi_se[1])
aoi = [aoi_nw, aoi_ne, aoi_se, aoi_sw, aoi_nw]
cat = SentinelCatalog()
datasets = cat.find('sentinel2', aoi, ag_season_start, ag_season_end)
print datasets
|
mit
|
antb/TPT----My-old-mod
|
src/python/stdlib/test/test_cookie.py
|
3
|
3429
|
# Simple test suite for Cookie.py
from test.test_support import run_unittest, run_doctest, check_warnings
import unittest
import Cookie
class CookieTests(unittest.TestCase):
# Currently this only tests SimpleCookie
def test_basic(self):
cases = [
{ 'data': 'chips=ahoy; vienna=finger',
'dict': {'chips':'ahoy', 'vienna':'finger'},
'repr': "<SimpleCookie: chips='ahoy' vienna='finger'>",
'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger',
},
{ 'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'},
'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; fudge=\\n;'>''',
'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
},
# Check illegal cookies that have an '=' char in an unquoted value
{ 'data': 'keebler=E=mc2',
'dict': {'keebler' : 'E=mc2'},
'repr': "<SimpleCookie: keebler='E=mc2'>",
'output': 'Set-Cookie: keebler=E=mc2',
}
]
for case in cases:
C = Cookie.SimpleCookie()
C.load(case['data'])
self.assertEqual(repr(C), case['repr'])
self.assertEqual(C.output(sep='\n'), case['output'])
for k, v in sorted(case['dict'].iteritems()):
self.assertEqual(C[k].value, v)
def test_load(self):
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
self.assertEqual(C.output(['path']),
'Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme')
self.assertEqual(C.js_output(), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme; Version=1";
// end hiding -->
</script>
""")
self.assertEqual(C.js_output(['path']), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme";
// end hiding -->
</script>
""")
# loading 'expires'
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01-Jan-2010 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01-Jan-2010 00:00:00 GMT')
C = Cookie.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01-Jan-98 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01-Jan-98 00:00:00 GMT')
def test_quoted_meta(self):
# Try cookie with quoted meta-data
C = Cookie.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
def test_main():
run_unittest(CookieTests)
with check_warnings(('.+Cookie class is insecure; do not use it',
DeprecationWarning)):
run_doctest(Cookie)
if __name__ == '__main__':
test_main()
|
gpl-2.0
|
ArcherSys/ArcherSys
|
Lib/site-packages/nbformat/v3/__init__.py
|
18
|
2328
|
"""The main API for the v3 notebook format.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
__all__ = ['NotebookNode', 'new_code_cell', 'new_text_cell', 'new_notebook',
'new_output', 'new_worksheet', 'new_metadata', 'new_author',
'new_heading_cell', 'nbformat', 'nbformat_minor', 'nbformat_schema',
'reads_json', 'writes_json', 'read_json', 'write_json',
'to_notebook_json', 'reads_py', 'writes_py', 'read_py', 'write_py',
'to_notebook_py', 'downgrade', 'upgrade', 'parse_filename'
]
import os
from .nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_notebook, new_output, new_worksheet,
new_metadata, new_author, new_heading_cell, nbformat, nbformat_minor,
nbformat_schema
)
from .nbjson import reads as reads_json, writes as writes_json
from .nbjson import reads as read_json, writes as write_json
from .nbjson import to_notebook as to_notebook_json
from .nbpy import reads as reads_py, writes as writes_py
from .nbpy import reads as read_py, writes as write_py
from .nbpy import to_notebook as to_notebook_py
from .convert import downgrade, upgrade
def parse_filename(fname):
"""Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
"""
basename, ext = os.path.splitext(fname)
if ext == u'.ipynb':
format = u'json'
elif ext == u'.json':
format = u'json'
elif ext == u'.py':
format = u'py'
else:
basename = fname
fname = fname + u'.ipynb'
format = u'json'
return fname, basename, format
|
mit
|
brendandahl/servo
|
tests/wpt/css-tests/css21_dev/html4/support/fonts/makegsubfonts.py
|
820
|
14309
|
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
mpl-2.0
|
barryrobison/arsenalsuite
|
cpp/apps/absubmit/cinema4d/submit.py
|
10
|
9966
|
#!/usr/bin/python
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.uic import *
from blur.Stone import *
from blur.Classes import *
from blur.absubmit import Submitter
from blur.Classesui import *
import sys, os
class Cinema4DRenderDialog(QDialog):
def __init__(self,parent=None):
QDialog.__init__(self,parent)
loadUi("cinema4drenderdialogui.ui",self)
self.connect( self.mAutoPacketSizeCheck, SIGNAL('toggled(bool)'), self.autoPacketSizeToggled )
self.connect( self.mChooseFileNameButton, SIGNAL('clicked()'), self.chooseFileName )
self.connect( self.mFrameStartSpin, SIGNAL('valueChanged(int)'), self.frameStartChanged )
self.connect( self.mFrameEndSpin, SIGNAL('valueChanged(int)'), self.frameEndChanged )
self.connect( self.mAllHostsCheck, SIGNAL('toggled(bool)'), self.allHostsToggled )
self.connect( self.mHostListButton, SIGNAL('clicked()'), self.showHostSelector )
self.layout().setSizeConstraint(QLayout.SetFixedSize);
self.mProjectCombo.setSpecialItemText( 'None' )
self.mProjectCombo.setStatusFilters( ProjectStatusList(ProjectStatus.recordByName( 'Production' )) )
self.Cinema4DServiceDict = { 'Cinema 4D R10' : 'cinema4d', 'Cinema 4D R11' : 'cinema4dr11' }
self.mVersionCombo.addItems( self.Cinema4DServiceDict.keys() )
self.Format = None
self.OutputPath = None
self.MultipassFormat = None
self.MultipassOutputPath = None
self.PassNames = []
self.HostList = ''
self.loadSettings()
def loadSettings(self):
c = userConfig()
c.pushSection( "LastSettings" )
project = Project.recordByName( c.readString( "Project" ) )
if project.isRecord():
self.mProjectCombo.setProject( project )
aps = c.readBool( "AutoPacketSize", True )
self.mAutoPacketSizeCheck.setChecked( aps )
if not aps:
self.mPacketSizeSpin.setValue( c.readInt( "PacketSize", 10 ) )
self.mJobNameEdit.setText( c.readString( "JobName" ) )
self.mFileNameEdit.setText( c.readString( "FileName" ) )
self.mJabberErrorsCheck.setChecked( c.readBool( "JabberErrors", False ) )
self.mJabberCompletionCheck.setChecked( c.readBool( "JabberCompletion", False ) )
self.mEmailErrorsCheck.setChecked( c.readBool( "EmailErrors", False ) )
self.mEmailCompletionCheck.setChecked( c.readBool( "EmailCompletion", False ) )
self.mPrioritySpin.setValue( c.readInt( "Priority", 50 ) )
self.mDeleteOnCompleteCheck.setChecked( c.readBool( "DeleteOnComplete", False ) )
c.popSection()
def saveSettings(self):
c = userConfig()
c.pushSection( "LastSettings" )
c.writeString( "Project", self.mProjectCombo.project().name() )
c.writeBool( "AutoPacketSize", self.mAutoPacketSizeCheck.isChecked() )
c.writeInt( "PacketSize", self.mPacketSizeSpin.value() )
c.writeString( "JobName", self.mJobNameEdit.text() )
c.writeString( "FileName", self.mFileNameEdit.text() )
c.writeBool( "JabberErrors", self.mJabberErrorsCheck.isChecked() )
c.writeBool( "JabberCompletion", self.mJabberCompletionCheck.isChecked() )
c.writeBool( "EmailErrors", self.mEmailErrorsCheck.isChecked() )
c.writeBool( "EmailCompletion", self.mEmailCompletionCheck.isChecked() )
c.writeInt( "Priority", self.mPrioritySpin.value() )
c.writeBool( "DeleteOnComplete", self.mDeleteOnCompleteCheck.isChecked() )
c.popSection()
def allHostsToggled(self,allHosts):
self.mHostListButton.setEnabled( not allHosts )
def showHostSelector(self):
hs = HostSelector(self)
hs.setServiceFilter( ServiceList(Service.recordByName('cinema4d')) )
hs.setHostList( self.HostList )
if hs.exec_() == QDialog.Accepted:
self.HostList = hs.hostStringList()
del hs
def frameStartChanged( self, val ):
self.mFrameEndSpin.setMinimum( val )
def frameEndChanged( self, val ):
self.mFrameStartSpin.setMaximum( val )
def autoPacketSizeToggled(self,autoPacketSize):
self.mPacketSizeSpin.setEnabled(not autoPacketSize)
def chooseFileName(self):
fileName = QFileDialog.getOpenFileName(self,'Choose Scene To Render', QString(), 'Cinema4D scenes (*.c4d)' )
if not fileName.isEmpty():
self.mFileNameEdit.setText(fileName)
def packetSize(self):
if self.mAutoPacketSizeCheck.isChecked():
return 0
return self.mPacketSizeSpin.value()
def buildNotifyString(self,jabber,email):
ret = ''
if jabber or email:
ret = getUserName() + ':'
if jabber:
ret += 'j'
if email:
ret += 'e'
return ret
# Returns tuple (notifyOnErrorString,notifyOnCompleteString)
def buildNotifyStrings(self):
return (
self.buildNotifyString(self.mJabberErrorsCheck.isChecked(), self.mEmailErrorsCheck.isChecked() ),
self.buildNotifyString(self.mJabberCompletionCheck.isChecked(), self.mEmailCompletionCheck.isChecked() ) )
def c4difyPath(self,outputPath,format):
opfi = QFileInfo(outputPath)
ret = opfi.path() + "/" + opfi.completeBaseName()
if ret.size() and ret.right(1).at(0).isDigit():
ret += "_"
ret += "." + format
return ret
def buildAbsubmitArgList(self):
sl = {}
notifyError, notifyComplete = self.buildNotifyStrings()
sl['jobType'] = 'Cinema4D'
sl['packetType'] = 'continuous'
sl['priority'] = str(self.mPrioritySpin.value())
sl['user'] = str(getUserName())
sl['packetSize'] = str(self.packetSize())
sl['frameList'] = (str(self.mFrameStartSpin.value()) + '-' + str(self.mFrameEndSpin.value()))
sl['fileName'] = str(self.mFileNameEdit.text())
sl['notifyOnError'] = notifyError
sl['notifyOnComplete'] = notifyComplete
sl['job'] = self.mJobNameEdit.text()
sl['deleteOnComplete'] = str(int(self.mDeleteOnCompleteCheck.isChecked()))
sl['services'] = self.Cinema4DServiceDict[str(self.mVersionCombo.currentText())]
if self.mProjectCombo.project().isRecord():
sl['projectName'] = self.mProjectCombo.project().name()
passNum = 0
if self.MultipassOutputPath and self.MultipassFormat and len(self.PassNames):
opfi = QFileInfo(self.MultipassOutputPath)
ext = opfi.suffix()
for passName in self.PassNames:
sl[('outputPath%i' % passNum)] = (opfi.filePath() + '_' + passName + '.' + self.MultipassFormat)
sl[('outputName%i' % passNum)] = passName
passNum += 1
if self.OutputPath and self.Format:
if passNum > 0:
sl[('outputName%i' % passNum)] = 'Regular Output'
sl[('outputPath%i' % passNum)] = self.c4difyPath(self.OutputPath,self.Format)
else:
sl['outputPath'] = self.c4difyPath(self.OutputPath,self.Format)
if not self.mAllHostsCheck.isChecked() and len(self.HostList):
sl['hostList'] = str(self.HostList)
return sl
def accept(self):
if self.mJobNameEdit.text().isEmpty():
QMessageBox.critical(self, 'Missing Job Name', 'You must choose a name for this job' )
return
if not QFile.exists( self.mFileNameEdit.text() ):
QMessageBox.critical(self, 'Invalid File', 'You must choose an existing cinema4d file' )
return
self.saveSettings()
submitter = Submitter(self)
self.connect( submitter, SIGNAL( 'submitSuccess()' ), self.submitSuccess )
self.connect( submitter, SIGNAL( 'submitError( const QString & )' ), self.submitError )
submitter.applyArgs( self.buildAbsubmitArgList() )
submitter.submit()
def submitSuccess(self):
Log( 'Submission Finished Successfully' )
QDialog.accept(self)
def submitError(self,errorMsg):
QMessageBox.critical(self, 'Submission Failed', 'Submission Failed With Error: ' + errorMsg)
Log( 'Submission Failed With Error: ' + errorMsg )
QDialog.reject(self)
if __name__ == "__main__":
if sys.platform == 'win32':
os.chdir("c:\\blur\\absubmit\\cinema4d\\")
app = QApplication(sys.argv)
initConfig("../absubmit.ini","cinema4dsubmit.log")
if sys.platform == 'win32':
cp = "h:/public/" + getUserName() + "/Blur";
if not QDir( cp ).exists():
cp = "C:/Documents and Settings/" + getUserName();
initUserConfig( cp + "/c4dsubmit.ini" );
else:
initUserConfig( QDir.homePath() + "/.c4dsubmit" );
initStone(sys.argv)
blurqt_loader()
dialog = Cinema4DRenderDialog()
opts = []
try:
opts = open("current_options.txt").read().split('\n')
except:
pass
Log( "Passed options: \n" + '\n'.join(opts) )
# Filename
if len(opts) >= 1:
dialog.mFileNameEdit.setText(opts[0])
dialog.mJobNameEdit.setText(QFileInfo(opts[0]).completeBaseName())
path = Path(opts[1])
if path.level() >= 1:
p = Project.recordByName( path[1] )
if p.isRecord():
dialog.mProjectCombo.setProject( p )
# Framestart/FrameEnd
if len(opts) >= 3:
dialog.mFrameStartSpin.setValue(int(float(opts[1])))
dialog.mFrameEndSpin.setValue(int(float(opts[2])))
# Format
if len(opts) >= 4:
if opts[3] != 'NONE':
dialog.Format = opts[3]
Log( "Format: " + dialog.Format )
# Output Path
if len(opts) >= 5:
if opts[4] != 'NONE':
dialog.OutputPath = opts[4]
Log( "Output Path:" + dialog.OutputPath )
# Multi-pass format
if len(opts) >= 6:
dialog.MultipassFormat = opts[5]
Log( "Multipass Format: " + dialog.MultipassFormat )
# Multi-pass filename
if len(opts) >= 7 and len(opts[6]):
dialog.MultipassOutputPath = opts[6]
Log( "Multipass output path: " + dialog.MultipassOutputPath )
# If multi-pass, comma separated list of pass names
if len(opts) >= 8 and len(opts[7]):
for pn in opts[7].split(','):
if len(pn):
dialog.PassNames.append(pn)
Log( "Pass Names: " + ','.join(dialog.PassNames) )
# If no outputs are set, give a message box and quite
if not dialog.OutputPath and not dialog.MultipassOutputPath:
QMessageBox.critical( None, 'No outputs defined', 'Cannot submit render because no render output files were defined.' )
else:
#if len(sys.argv) > 3:
# dialog.mOutputPathCombo.addItems( sys.argv[3:] )
dialog.show()
app.exec_()
shutdown()
|
gpl-2.0
|
sean-/ansible
|
test/units/parsing/vault/test_vault.py
|
87
|
5672
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_add_header',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_add_header(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = "ansible"
data = v._add_header(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_unicode(lines[0])
assert header.endswith(';TEST'), "header does end with cipher name"
header_parts = header.split(';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.version, "header version is incorrect"
assert header_parts[2] == 'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.version == "9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt("foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != "foobar", "encryption failed"
assert dec_data == "foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
|
gpl-3.0
|
sivel/ansible
|
test/integration/targets/module_utils_urls/library/test_peercert.py
|
29
|
2346
|
#!/usr/bin/python
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: test_perrcert
short_description: Test getting the peer certificate of a HTTP response
description: Test getting the peer certificate of a HTTP response.
options:
url:
description: The endpoint to get the peer cert for
required: true
type: str
author:
- Ansible Project
'''
EXAMPLES = r'''
#
'''
RETURN = r'''
#
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import getpeercert, Request
def get_x509_shorthand(name, value):
prefix = {
'countryName': 'C',
'stateOrProvinceName': 'ST',
'localityName': 'L',
'organizationName': 'O',
'commonName': 'CN',
'organizationalUnitName': 'OU',
}[name]
return '%s=%s' % (prefix, value)
def main():
module_args = dict(
url=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': False,
'cert': None,
'raw_cert': None,
}
req = Request().get(module.params['url'])
try:
cert = getpeercert(req)
b_cert = getpeercert(req, binary_form=True)
finally:
req.close()
if cert:
processed_cert = {
'issuer': '',
'not_after': cert.get('notAfter', None),
'not_before': cert.get('notBefore', None),
'serial_number': cert.get('serialNumber', None),
'subject': '',
'version': cert.get('version', None),
}
for field in ['issuer', 'subject']:
field_values = []
for x509_part in cert.get(field, []):
field_values.append(get_x509_shorthand(x509_part[0][0], x509_part[0][1]))
processed_cert[field] = ",".join(field_values)
result['cert'] = processed_cert
if b_cert:
result['raw_cert'] = to_text(base64.b64encode(b_cert))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
lammps/lammps
|
tools/i-pi/ipi/utils/softexit.py
|
41
|
2190
|
"""Utility functions for killing the wrapper softly.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
Softexit: Concise class to manage cleaning up in case of an emergency exit.
"""
import traceback, sys
from ipi.utils.messages import verbosity, warning
__all__ = ['Softexit', 'softexit']
class Softexit(object):
"""Class to deal with stopping a simulation half way through.
Holds the functions used to clean up a simulation that has been
stopped early, either because of a SIGTERM signal or because the
user has added an EXIT file to the directory in which it is
running. This will then properly shut down the socket interface,
and print out a RESTART file for the appropriate time step.
Attributes:
flist: A list of functions used to close down the socket
interface.
"""
def __init__(self):
"""Initializes SoftExit."""
self.flist = []
def register(self, func):
"""Adds another function to flist.
Args:
func: The function to be added to flist.
"""
self.flist.append(func)
def trigger(self, message=""):
"""Halts the simulation.
Prints out a warning message, then runs all the exit functions in flist
before terminating the simulation.
Args:
message: The message to output to standard output.
"""
if message != "":
warning("Soft exit has been requested with message: '" + message + "'. Cleaning up.", verbosity.low)
for f in self.flist:
f()
sys.exit()
softexit = Softexit()
|
gpl-2.0
|
Wilbeibi/rethinkdb
|
external/v8_3.30.33.16/build/gyp/tools/pretty_sln.py
|
806
|
5092
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
agpl-3.0
|
alcobar/asuswrt-merlin
|
release/src/router/samba-3.5.8/source4/heimdal/lib/wind/UnicodeData.py
|
22
|
2225
|
#!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id: UnicodeData.py,v 1.1.1.1 2011/06/10 09:34:43 andrew Exp $
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read(filename):
"""return a dict of unicode characters"""
ud = open(filename, 'r')
ret = {}
while True:
l = ud.readline()
if not l:
break
l = re.sub('#.*$', '', l)
if l == "\n":
continue
f = l.split(';')
key = int(f[0], 0x10)
if key in ret:
raise Exception('Duplicate key in UnicodeData')
ret[key] = f[1:]
ud.close()
return ret
|
gpl-2.0
|
jay-lau/magnum
|
magnum/tests/functional/api/v1/models/baypatch_model.py
|
1
|
2317
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from magnum.tests.functional.common import models
class BayPatchData(models.BaseModel):
"""Data that encapsulates baypatch attributes"""
pass
class BayPatchEntity(models.EntityModel):
"""Entity Model that represents a single instance of BayPatchData"""
ENTITY_NAME = 'baypatch'
MODEL_TYPE = BayPatchData
class BayPatchCollection(models.CollectionModel):
"""Collection Model that represents a list of BayPatchData objects"""
MODEL_TYPE = BayPatchData
COLLECTION_NAME = 'baypatchlist'
def to_json(self):
"""Converts BayPatchCollection to json
Retrieves list from COLLECTION_NAME attribute and converts each object
to dict, appending it to a list. Then converts the entire list to json
This is required due to COLLECTION_NAME holding a list of objects that
needed to be converted to dict individually
:returns: json object
"""
data = getattr(self, BayPatchCollection.COLLECTION_NAME)
collection = []
for d in data:
collection.append(d.to_dict())
return json.dumps(collection)
@classmethod
def from_dict(cls, data):
"""Converts dict to BayPatchData
Converts data dict to list of BayPatchData objects and stores it
in COLLECTION_NAME
Example of dict data:
[{
"path": "/name",
"value": "myname",
"op": "replace"
}]
:param data: dict of patch data
:returns: json object
"""
model = cls()
collection = []
for d in data:
collection.append(cls.MODEL_TYPE.from_dict(d))
setattr(model, cls.COLLECTION_NAME, collection)
return model
|
apache-2.0
|
benoitsteiner/tensorflow
|
tensorflow/contrib/data/python/kernel_tests/sequence_dataset_op_test.py
|
39
|
8034
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SequenceDatasetTest(test.TestCase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components)
.repeat(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test a finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 3})
for _ in range(3):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test a different finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 7})
for _ in range(7):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an empty repetition.
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
sess.run(init_op, feed_dict={count_placeholder: -1})
for _ in range(17):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testTakeTensorDataset(self):
components = (np.arange(10),)
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.take(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Take fewer than input size
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take more than input size
sess.run(init_op, feed_dict={count_placeholder: 25})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take all of input
sess.run(init_op, feed_dict={count_placeholder: -1})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSkipTensorDataset(self):
components = (np.arange(10),)
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.skip(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Skip fewer than input size, we should skip
# the first 4 elements and then read the rest.
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip more than input size: get nothing.
sess.run(init_op, feed_dict={count_placeholder: 25})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip exactly input size.
sess.run(init_op, feed_dict={count_placeholder: 10})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Set -1 for 'count': skip the entire dataset.
sess.run(init_op, feed_dict={count_placeholder: -1})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
for i in range(0, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
inner_count = array_ops.placeholder(dtypes.int64, shape=[])
outer_count = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components).repeat(inner_count)
.repeat(outer_count).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op, feed_dict={inner_count: 7, outer_count: 14})
for _ in range(7 * 14):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10)
.repeat(-1).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
"Attempted to repeat an empty dataset infinitely."):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
tanmaykm/edx-platform
|
common/test/acceptance/tests/lms/test_programs.py
|
13
|
6048
|
"""Acceptance tests for LMS-hosted Programs pages"""
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.catalog import CatalogFixture, CatalogConfigMixin
from common.test.acceptance.fixtures.programs import ProgramsFixture, ProgramsConfigMixin
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.tests.helpers import UniqueCourseTest
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.programs import ProgramListingPage, ProgramDetailsPage
from openedx.core.djangoapps.catalog.tests import factories as catalog_factories
from openedx.core.djangoapps.programs.tests import factories as program_factories
class ProgramPageBase(ProgramsConfigMixin, CatalogConfigMixin, UniqueCourseTest):
"""Base class used for program listing page tests."""
def setUp(self):
super(ProgramPageBase, self).setUp()
self.set_programs_api_configuration(is_enabled=True)
self.programs = [catalog_factories.Program() for __ in range(3)]
self.course_run = catalog_factories.CourseRun(key=self.course_id)
self.stub_catalog_api()
def create_program(self, program_id=None, course_id=None):
"""DRY helper for creating test program data."""
course_id = course_id if course_id else self.course_id
run_mode = program_factories.RunMode(course_key=course_id)
course_code = program_factories.CourseCode(run_modes=[run_mode])
org = program_factories.Organization(key=self.course_info['org'])
if program_id:
program = program_factories.Program(
id=program_id,
status='active',
organizations=[org],
course_codes=[course_code]
)
else:
program = program_factories.Program(
status='active',
organizations=[org],
course_codes=[course_code]
)
return program
def stub_programs_api(self, programs, is_list=True):
"""Stub out the programs API with fake data."""
ProgramsFixture().install_programs(programs, is_list=is_list)
def stub_catalog_api(self):
"""Stub out the catalog API's program and course run endpoints."""
self.set_catalog_configuration(is_enabled=True)
CatalogFixture().install_programs(self.programs)
CatalogFixture().install_course_run(self.course_run)
def auth(self, enroll=True):
"""Authenticate, enrolling the user in the configured course if requested."""
CourseFixture(**self.course_info).install()
course_id = self.course_id if enroll else None
AutoAuthPage(self.browser, course_id=course_id).visit()
class ProgramListingPageTest(ProgramPageBase):
"""Verify user-facing behavior of the program listing page."""
def setUp(self):
super(ProgramListingPageTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
def test_no_enrollments(self):
"""Verify that no cards appear when the user has no enrollments."""
program = self.create_program()
self.stub_programs_api([program])
self.auth(enroll=False)
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_no_programs(self):
"""
Verify that no cards appear when the user has enrollments
but none are included in an active program.
"""
course_id = self.course_id.replace(
self.course_info['run'],
'other_run'
)
program = self.create_program(course_id=course_id)
self.stub_programs_api([program])
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
def test_enrollments_and_programs(self):
"""
Verify that cards appear when the user has enrollments
which are included in at least one active program.
"""
program = self.create_program()
self.stub_programs_api([program])
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
@attr('a11y')
class ProgramListingPageA11yTest(ProgramPageBase):
"""Test program listing page accessibility."""
def setUp(self):
super(ProgramListingPageA11yTest, self).setUp()
self.listing_page = ProgramListingPage(self.browser)
program = self.create_program()
self.stub_programs_api([program])
def test_empty_a11y(self):
"""Test a11y of the page's empty state."""
self.auth(enroll=False)
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertFalse(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
def test_cards_a11y(self):
"""Test a11y when program cards are present."""
self.auth()
self.listing_page.visit()
self.assertTrue(self.listing_page.is_sidebar_present)
self.assertTrue(self.listing_page.are_cards_present)
self.listing_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
class ProgramDetailsPageA11yTest(ProgramPageBase):
"""Test program details page accessibility."""
def setUp(self):
super(ProgramDetailsPageA11yTest, self).setUp()
self.details_page = ProgramDetailsPage(self.browser)
program = self.create_program(program_id=self.details_page.program_id)
self.stub_programs_api([program], is_list=False)
def test_a11y(self):
"""Test the page's a11y compliance."""
self.auth()
self.details_page.visit()
self.details_page.a11y_audit.check_for_accessibility_errors()
|
agpl-3.0
|
frankvdp/django
|
django/conf/locale/zh_Hans/formats.py
|
140
|
1745
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see https://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
bsd-3-clause
|
TallonRain/horsefaxbot
|
horsefax/telegram/connections/polling.py
|
1
|
1484
|
import requests
import threading
import time
import traceback
from . import TelegramConnection, MessageHandler
class LongPollingConnection(TelegramConnection):
def __init__(self, token: str, handler: MessageHandler) -> None:
super().__init__(token, handler)
self._connected = False
self.latest_update = 0
self.thread = threading.Thread(target=self.run)
def connect(self):
self.thread.start()
@property
def connected(self) -> bool:
return self._connected
def disconnect(self):
self._connected = False
def run(self):
self._connected = True
while self.connected:
try:
updates = self.request("getUpdates", json={"offset": self.latest_update + 1, "timeout": 60}, timeout=70)
except requests.RequestException as e:
print(e)
time.sleep(10)
continue
if not updates:
continue
updates = sorted(updates, key=lambda x: x['update_id'])
for update in updates:
# doing this per message ensures that we don't drop messages if we crash out.
self.latest_update = update['update_id']
try:
self.handler(update)
except Exception as e:
print(f"Something went terribly wrong processing update {update['update_id']}:")
traceback.print_exc()
|
mit
|
v-iam/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/express_route_circuit_sku.py
|
2
|
1546
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitSku(Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values are 'Standard' and
'Premium'. Possible values include: 'Standard', 'Premium'
:type tier: str or :class:`ExpressRouteCircuitSkuTier
<azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitSkuTier>`
:param family: The family of the SKU. Possible values are: 'UnlimitedData'
and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData'
:type family: str or :class:`ExpressRouteCircuitSkuFamily
<azure.mgmt.network.v2017_03_01.models.ExpressRouteCircuitSkuFamily>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(self, name=None, tier=None, family=None):
self.name = name
self.tier = tier
self.family = family
|
mit
|
MQQiang/kbengine
|
kbe/src/lib/python/Lib/http/cookiejar.py
|
63
|
75791
|
r"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
import copy
import datetime
import re
import time
import urllib.parse, urllib.request
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import http.client # only for the default HTTP port
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("http.cookiejar")
return logger.debug(*args)
DEFAULT_HTTP_PORT = str(http.client.HTTP_PORT)
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import io, warnings, traceback
f = io.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII)
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X | re.ASCII)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X | re. ASCII)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, str)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def strip_quotes(text):
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
v = strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
v = http2time(strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$", re.ASCII)
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urllib.parse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""Path component of request-URI, as defined by RFC 2965."""
url = request.get_full_url()
parts = urllib.parse.urlsplit(url)
path = escape_path(parts.path)
if not path.startswith("/"):
# fix bad RFC 2396 absoluteURI
path = "/" + path
return path
def request_port(request):
host = request.host
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
path = urllib.parse.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.origin_req_host)):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.type != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = sorted(adict.keys())
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
yield from deepvalues(obj)
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib.request.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda a: len(a.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
# derives from OSError for backwards-compatibility with Python 2.4.0
class LoadError(OSError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename) as f:
self._really_load(f, filename, ignore_discard, ignore_expires)
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or OSError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except OSError:
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = sorted(cookie._rest.keys())
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not self.magic_re.search(magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = re.compile("#( Netscape)? HTTP Cookie File")
header = """\
# Netscape HTTP Cookie File
# http://curl.haxx.se/rfc/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not self.magic_re.search(magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
|
lgpl-3.0
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/segmentation/tests/test_clear_border.py
|
4
|
4369
|
import numpy as np
from skimage.segmentation import clear_border
from skimage._shared.testing import assert_array_equal, assert_
def test_clear_border():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
# test default case
result = clear_border(image.copy())
ref = image.copy()
ref[2, 0] = 0
ref[0, -2] = 0
assert_array_equal(result, ref)
# test buffer
result = clear_border(image.copy(), 1)
assert_array_equal(result, np.zeros(result.shape))
# test background value
result = clear_border(image.copy(), buffer_size=1, bgval=2)
assert_array_equal(result, 2 * np.ones_like(image))
def test_clear_border_3d():
image = np.array([
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
])
# test default case
result = clear_border(image.copy())
ref = image.copy()
ref[0, 3, 0] = 0
assert_array_equal(result, ref)
# test buffer
result = clear_border(image.copy(), 1)
assert_array_equal(result, np.zeros(result.shape))
# test background value
result = clear_border(image.copy(), buffer_size=1, bgval=2)
assert_array_equal(result, 2 * np.ones_like(image))
def test_clear_border_non_binary():
image = np.array([[1, 2, 3, 1, 2],
[3, 3, 5, 4, 2],
[3, 4, 5, 4, 2],
[3, 3, 2, 1, 2]])
result = clear_border(image)
expected = np.array([[0, 0, 0, 0, 0],
[0, 0, 5, 4, 0],
[0, 4, 5, 4, 0],
[0, 0, 0, 0, 0]])
assert_array_equal(result, expected)
assert_(not np.all(image == result))
def test_clear_border_non_binary_3d():
image3d = np.array(
[[[1, 2, 3, 1, 2],
[3, 3, 3, 4, 2],
[3, 4, 3, 4, 2],
[3, 3, 2, 1, 2]],
[[1, 2, 3, 1, 2],
[3, 3, 5, 4, 2],
[3, 4, 5, 4, 2],
[3, 3, 2, 1, 2]],
[[1, 2, 3, 1, 2],
[3, 3, 3, 4, 2],
[3, 4, 3, 4, 2],
[3, 3, 2, 1, 2]],
])
result = clear_border(image3d)
expected = np.array(
[[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
])
assert_array_equal(result, expected)
assert_(not np.all(image3d == result))
def test_clear_border_non_binary_inplace():
image = np.array([[1, 2, 3, 1, 2],
[3, 3, 5, 4, 2],
[3, 4, 5, 4, 2],
[3, 3, 2, 1, 2]])
result = clear_border(image, in_place=True)
expected = np.array([[0, 0, 0, 0, 0],
[0, 0, 5, 4, 0],
[0, 4, 5, 4, 0],
[0, 0, 0, 0, 0]])
assert_array_equal(result, expected)
assert_array_equal(image, result)
def test_clear_border_non_binary_inplace_3d():
image3d = np.array(
[[[1, 2, 3, 1, 2],
[3, 3, 3, 4, 2],
[3, 4, 3, 4, 2],
[3, 3, 2, 1, 2]],
[[1, 2, 3, 1, 2],
[3, 3, 5, 4, 2],
[3, 4, 5, 4, 2],
[3, 3, 2, 1, 2]],
[[1, 2, 3, 1, 2],
[3, 3, 3, 4, 2],
[3, 4, 3, 4, 2],
[3, 3, 2, 1, 2]],
])
result = clear_border(image3d, in_place=True)
expected = np.array(
[[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 5, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
])
assert_array_equal(result, expected)
assert_array_equal(image3d, result)
|
gpl-3.0
|
adobe/chromium
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/gl_table.py
|
33
|
7098
|
#!/usr/bin/python2
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML
import license
import sys, getopt
class PrintGlTable(gl_XML.gl_print_base):
def __init__(self, es=False):
gl_XML.gl_print_base.__init__(self)
self.es = es
self.header_tag = '_GLAPI_TABLE_H_'
self.name = "gl_table.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
return
def printBody(self, api):
for f in api.functionIterateByOffset():
arg_string = f.get_parameter_string()
print ' %s (GLAPIENTRYP %s)(%s); /* %d */' % (f.return_type, f.name, arg_string, f.offset)
def printRealHeader(self):
print '#ifndef GLAPIENTRYP'
print '# ifndef GLAPIENTRY'
print '# define GLAPIENTRY'
print '# endif'
print ''
print '# define GLAPIENTRYP GLAPIENTRY *'
print '#endif'
print ''
print ''
print 'struct _glapi_table'
print '{'
return
def printRealFooter(self):
print '};'
return
class PrintRemapTable(gl_XML.gl_print_base):
def __init__(self, es=False):
gl_XML.gl_print_base.__init__(self)
self.es = es
self.header_tag = '_GLAPI_DISPATCH_H_'
self.name = "gl_table.py (from Mesa)"
self.license = license.bsd_license_template % ("(C) Copyright IBM Corporation 2005", "IBM")
return
def printRealHeader(self):
print """
/* this file should not be included directly in mesa */
/**
* \\file glapidispatch.h
* Macros for handling GL dispatch tables.
*
* For each known GL function, there are 3 macros in this file. The first
* macro is named CALL_FuncName and is used to call that GL function using
* the specified dispatch table. The other 2 macros, called GET_FuncName
* can SET_FuncName, are used to get and set the dispatch pointer for the
* named function in the specified dispatch table.
*/
"""
return
def printBody(self, api):
print '#define CALL_by_offset(disp, cast, offset, parameters) \\'
print ' (*(cast (GET_by_offset(disp, offset)))) parameters'
print '#define GET_by_offset(disp, offset) \\'
print ' (offset >= 0) ? (((_glapi_proc *)(disp))[offset]) : NULL'
print '#define SET_by_offset(disp, offset, fn) \\'
print ' do { \\'
print ' if ( (offset) < 0 ) { \\'
print ' /* fprintf( stderr, "[%s:%u] SET_by_offset(%p, %d, %s)!\\n", */ \\'
print ' /* __func__, __LINE__, disp, offset, # fn); */ \\'
print ' /* abort(); */ \\'
print ' } \\'
print ' else { \\'
print ' ( (_glapi_proc *) (disp) )[offset] = (_glapi_proc) fn; \\'
print ' } \\'
print ' } while(0)'
print ''
functions = []
abi_functions = []
alias_functions = []
count = 0
for f in api.functionIterateByOffset():
if not f.is_abi():
functions.append( [f, count] )
count += 1
else:
abi_functions.append( f )
if self.es:
# remember functions with aliases
if len(f.entry_points) > 1:
alias_functions.append(f)
for f in abi_functions:
print '#define CALL_%s(disp, parameters) (*((disp)->%s)) parameters' % (f.name, f.name)
print '#define GET_%s(disp) ((disp)->%s)' % (f.name, f.name)
print '#define SET_%s(disp, fn) ((disp)->%s = fn)' % (f.name, f.name)
print ''
print '#if !defined(_GLAPI_USE_REMAP_TABLE)'
print ''
for [f, index] in functions:
print '#define CALL_%s(disp, parameters) (*((disp)->%s)) parameters' % (f.name, f.name)
print '#define GET_%s(disp) ((disp)->%s)' % (f.name, f.name)
print '#define SET_%s(disp, fn) ((disp)->%s = fn)' % (f.name, f.name)
print ''
print '#else'
print ''
print '#define driDispatchRemapTable_size %u' % (count)
print 'extern int driDispatchRemapTable[ driDispatchRemapTable_size ];'
print ''
for [f, index] in functions:
print '#define %s_remap_index %u' % (f.name, index)
print ''
for [f, index] in functions:
arg_string = gl_XML.create_parameter_string( f.parameters, 0 )
cast = '%s (GLAPIENTRYP)(%s)' % (f.return_type, arg_string)
print '#define CALL_%s(disp, parameters) CALL_by_offset(disp, (%s), driDispatchRemapTable[%s_remap_index], parameters)' % (f.name, cast, f.name)
print '#define GET_%s(disp) GET_by_offset(disp, driDispatchRemapTable[%s_remap_index])' % (f.name, f.name)
print '#define SET_%s(disp, fn) SET_by_offset(disp, driDispatchRemapTable[%s_remap_index], fn)' % (f.name, f.name)
print ''
print '#endif /* !defined(_GLAPI_USE_REMAP_TABLE) */'
if alias_functions:
print ''
print '/* define aliases for compatibility */'
for f in alias_functions:
for name in f.entry_points:
if name != f.name:
print '#define CALL_%s(disp, parameters) CALL_%s(disp, parameters)' % (name, f.name)
print '#define GET_%s(disp) GET_%s(disp)' % (name, f.name)
print '#define SET_%s(disp, fn) SET_%s(disp, fn)' % (name, f.name)
print ''
print '#if defined(_GLAPI_USE_REMAP_TABLE)'
for f in alias_functions:
for name in f.entry_points:
if name != f.name:
print '#define %s_remap_index %s_remap_index' % (name, f.name)
print '#endif /* defined(_GLAPI_USE_REMAP_TABLE) */'
print ''
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m mode] [-c]" % sys.argv[0]
print " -m mode Mode can be 'table' or 'remap_table'."
print " -c Enable compatibility with OpenGL ES."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:c")
except Exception,e:
show_usage()
mode = "table"
es = False
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "-c":
es = True
if mode == "table":
printer = PrintGlTable(es)
elif mode == "remap_table":
printer = PrintRemapTable(es)
else:
show_usage()
api = gl_XML.parse_GL_API( file_name )
printer.Print( api )
|
bsd-3-clause
|
wanderine/nipype
|
nipype/interfaces/afni/tests/test_auto_Bandpass.py
|
9
|
1844
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.afni.preprocess import Bandpass
def test_Bandpass_inputs():
input_map = dict(args=dict(argstr='%s',
),
automask=dict(argstr='-automask',
),
blur=dict(argstr='-blur %f',
),
despike=dict(argstr='-despike',
),
environ=dict(nohash=True,
usedefault=True,
),
highpass=dict(argstr='%f',
mandatory=True,
position=-3,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
localPV=dict(argstr='-localPV %f',
),
lowpass=dict(argstr='%f',
mandatory=True,
position=-2,
),
mask=dict(argstr='-mask %s',
position=2,
),
nfft=dict(argstr='-nfft %d',
),
no_detrend=dict(argstr='-nodetrend',
),
normalize=dict(argstr='-norm',
),
notrans=dict(argstr='-notrans',
),
orthogonalize_dset=dict(argstr='-dsort %s',
),
orthogonalize_file=dict(argstr='-ort %s',
),
out_file=dict(argstr='-prefix %s',
genfile=True,
name_source='in_file',
name_template='%s_bp',
position=1,
),
outputtype=dict(),
terminal_output=dict(nohash=True,
),
tr=dict(argstr='-dt %f',
),
)
inputs = Bandpass.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Bandpass_outputs():
output_map = dict(out_file=dict(),
)
outputs = Bandpass.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
rossburton/yocto-autobuilder
|
lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/test/fake/slavebuilder.py
|
4
|
1411
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import pprint
class FakeSlaveBuilder:
"""
Simulates a SlaveBuilder, but just records the updates from sendUpdate
in its updates attribute. Call show() to get a pretty-printed string
showing the updates. Set debug to True to show updates as they happen.
"""
debug = False
def __init__(self, usePTY=False, basedir="/slavebuilder/basedir"):
self.updates = []
self.basedir = basedir
self.usePTY = usePTY
self.unicode_encoding = 'utf-8'
def sendUpdate(self, data):
if self.debug:
print "FakeSlaveBuilder.sendUpdate", data
self.updates.append(data)
def show(self):
return pprint.pformat(self.updates)
|
gpl-2.0
|
an7oine/WinVHS
|
Cygwin/lib/python2.7/xml/sax/handler.py
|
230
|
13921
|
"""
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
|
gpl-3.0
|
glenn-edgar/local_controller_3
|
io_control_py3/click_controller_class_py3.py
|
1
|
8632
|
#
#
# File: io_controller_class.py
#
#
#
#
#
import struct
import os
import sys
import time
import select
import socket
import json
import redis
class Click_Controller_Base_Class(object):
def __init__(self,instrument, click_io = [], m_tags = {}):
self.instrument = instrument
self.click_reg_address = {}
self.click_bit_address = {}
self.click_io = click_io
m_tags["turn_on_valves"] = self.turn_on_valves
m_tags["turn_off_valves"] = self.turn_off_valves
m_tags["load_duration_counters"] = self.load_duration_counters
m_tags["clear_duration_counters"] = self.clear_duration_counters
m_tags["read_mode_switch"] = self.read_mode_switch
m_tags["read_mode"] = self.read_mode
m_tags["read_wd_flag"] = self.read_wd_flag
m_tags["write_wd_flag"] = self.write_wd_flag
m_tags["read_input_bit"] = self.read_input_bit
self.m_tags = m_tags
for i in range(0,500):
temp ="DF"+str(i+1)
self.click_reg_address[temp] = 0x7000+(i)*2
for i in range(0,1000):
temp = "DS"+str(i+1)
self.click_reg_address[ temp ] = i
for i in range(0,256):
temp = "C"+str(i+1)
self.click_bit_address[ temp ] = 0x4000 + i
temp = [ "01","02","03","04","05","06","07","08","09","10","12","13","14","15","16" ]
count = 0
for i in temp:
temp = "X0"+i
self.click_bit_address[temp] = count
temp = "X1"+i
self.click_bit_address[temp] = 0x20 + count
temp = "X2"+i
self.click_bit_address[temp] = 0x40 + count
temp = "X3"+i
self.click_bit_address[temp] = 0x60 + count
temp = "X4"+i
self.click_bit_address[temp] = 0x80 + count
temp = "X5"+i
self.click_bit_address[temp] = 0xA0 + count
temp = "X6"+i
self.click_bit_address[temp] = 0xc0 + count
temp = "X7"+i
self.click_bit_address[temp] = 0xe0 + count
temp = "X8"+i
self.click_bit_address[temp] = 0x100 + count
temp = "Y0"+i
self.click_bit_address[temp] = 0x2000 +count
temp = "Y1"+i
self.click_bit_address[temp] = 0x2020 + count
temp = "Y2"+i
self.click_bit_address[temp] = 0x2040 + count
temp = "Y3"+i
self.click_bit_address[temp] = 0x2060 + count
temp = "Y4"+i
self.click_bit_address[temp] = 0x2080 + count
temp = "Y5"+i
self.click_bit_address[temp] = 0x20A0 + count
temp = "Y6"+i
self.click_bit_address[temp] = 0x20c0 + count
temp = "Y7"+i
self.click_bit_address[temp] = 0x20e0 + count
temp = "Y8"+i
self.click_bit_address[temp] = 0x2100 + count
count = count+1
for i in range(1,999):
temp = "SC"+str(i)
self.click_bit_address[temp] = 0xf000 + i-1
def disable_all_sprinklers( self, modbus_address, input_list ):
write_bit = self.click_bit_address["C1"]
self.instrument.write_bits(modbus_address,write_bit, [0] )
self.instrument.write_bits(modbus_address,write_bit, [1] )
self.instrument.write_bits(modbus_address,write_bit, [0] )
def turn_on_valves( self, modbus_address, input_list ):
for valve in input_list:
valve = valve -1
bit_symbol = self.click_io[ valve ]
bit_address = self.click_bit_address[bit_symbol]
self.instrument.write_bits( modbus_address, bit_address,[1])
def turn_off_valves( self, modbus_address, input_list ):
for valve in input_list:
valve = valve -1
bit_symbol = self.click_io[ valve ]
bit_address = self.click_bit_address[bit_symbol]
self.instrument.write_bits( modbus_address, bit_address,[0])
def load_duration_counters( self, modbus_address,input_list ):
duration = input_list[0]
write_bit = self.click_bit_address["C2"]
write_register = self.click_reg_address["DS2"]
self.instrument.write_registers( modbus_address, write_register, [duration] )
self.instrument.write_bits( modbus_address, write_bit,[0])
self.instrument.write_bits( modbus_address, write_bit,[1])
def clear_duration_counters( self, modbus_address ):
write_bit = self.click_bit_address["C2"]
write_register = self.click_reg_address["DS2"]
self.instrument.write_registers( modbus_address, write_register, [0] )
self.instrument.write_bits( modbus_address, write_bit,[0])
def read_input_bit( self, modbus_address, input_list ):
read_bit = self.click_bit_address[input_list[0]]
return self.instrument.read_bits(modbus_address, read_bit,1 )[0]
def read_mode_switch( self, modbus_address ):
read_bit = self.click_bit_address["SC11"]
return self.instrument.read_bits( modbus_address, read_bit,1 )[0]
def read_mode( self, modbus_address ):
read_bit = self.click_bit_address["SC10"]
return self.instrument.read_bits( modbus_address, read_bit,1 )[0]
def read_wd_flag( self, modbus_address ):
read_bit = self.click_bit_address["C200"]
return self.instrument.read_bits( modbus_address, read_bit,1 )[0]
def write_wd_flag( self, modbus_address ):
write_bit = self.click_bit_address["C200"]
self.instrument.write_bits(modbus_address,write_bit, [1] )
def measure_counter( self, modbus_address, io_dict ):
counter_register = io_dict["read_register"]
latch_bit = io_dict["latch_bit"]
write_bit = self.click_bit_address[ latch_bit ]
# These three statements create a rising pulse
self.instrument.write_bits( modbus_address, write_bit,[0])
self.instrument.write_bits( modbus_address, write_bit,[1])
self.instrument.write_bits( modbus_address, write_bit,[0])
time.sleep(.1)
read_register = self.click_reg_address[ counter_register ]
counter_value = self.instrument.read_registers( modbus_address, read_register ,1 )[0]
return counter_value
class Click_Controller_Base_Class_44(Click_Controller_Base_Class):
def __init__(self,instrument):
click_io = [
"Y001","Y002","Y003","Y004", # 1-4
"Y101","Y102","Y103","Y104","Y105","Y106","Y107","Y108", # 5 -12
"Y201","Y202","Y203","Y204","Y205","Y206","Y207","Y208", # 13 -20
"Y301","Y302","Y303","Y304","Y305","Y306","Y307","Y308", # 21 -28
"Y401","Y402","Y403","Y404","Y405","Y406","Y407","Y408", # 29 -36
"Y501","Y502","Y503","Y504", # 37 -40
"Y601","Y602","Y603","Y604" # 41 -44
]
m_tags = {}
m_tags["measure_analog"] = self.measure_analog
m_tags["measure_counter"] = self.measure_counter
super().__init__(instrument, click_io, m_tags = m_tags )
def measure_analog( self, modbus_address, list_input ):
read_register = str(list_input[0])
conversion_factor = list_input[1]
if isinstance( read_register, str):
register = self.click_reg_address[read_register]
value = self.instrument.read_floats( modbus_address, register,1 )
conv_value = value[0] * conversion_factor
return conv_value
class Click_Controller_Base_Class_22(Click_Controller_Base_Class):
def __init__(self,instrument):
click_io = [
"Y001","Y002","Y003","Y004", "Y005","Y006", # 1-6
"Y101","Y102","Y103","Y104","Y105","Y106","Y107","Y108", # 7 -14
"Y201","Y202","Y203","Y204","Y205","Y206","Y207","Y208" # 15 -22
]
super().__init__(instrument,click_io)
if __name__ == "__main__":
from .new_instrument_py3 import Modbus_Instrument
instrument = Modbus_Instrument()
x = Click_Controller_Base_Class_44( instrument )
y = Click_Controller_Base_Class_22( instrument )
print (x.m_tags)
print (y.m_tags)
print(x.read_wd_flag( 100 ))
x.write_wd_flag(100)
print(x.read_wd_flag( 100 ))
|
mit
|
eggplantbren/ExperimentalNS
|
TwoScalars/DNest/postprocess.py
|
1
|
7100
|
# Copyright (c) 2009, 2010, 2011, 2012 Brendon J. Brewer.
#
# This file is part of DNest3.
#
# DNest3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DNest3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DNest3. If not, see <http://www.gnu.org/licenses/>.
import copy
import numpy as np
import matplotlib.pyplot as plt
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True):
if len(loaded) == 0:
levels = np.atleast_2d(np.loadtxt("levels.txt"))
sample_info = np.atleast_2d(np.loadtxt("sample_info.txt"))
sample = np.atleast_2d(np.loadtxt("sample.txt"))
#if(sample.shape[0] == 1):
# sample = sample.T
else:
levels, sample_info, sample = loaded[0], loaded[1], loaded[2]
sample = sample[int(cut*sample.shape[0]):, :]
sample_info = sample_info[int(cut*sample_info.shape[0]):, :]
if sample.shape[0] != sample_info.shape[0]:
print('# Size mismatch. Truncating...')
lowest = np.min([sample.shape[0], sample_info.shape[0]])
sample = sample[0:lowest, :]
sample_info = sample_info[0:lowest, :]
if plot:
if numResampleLogX > 1:
plt.ion()
plt.figure(1)
plt.plot(sample_info[:,0])
plt.xlabel("Iteration")
plt.ylabel("Level")
if numResampleLogX > 1:
plt.draw()
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels[:,0]))
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='r')
plt.ylim(ymax=0.05)
if numResampleLogX > 1:
plt.draw()
plt.subplot(2,1,2)
good = np.nonzero(levels[:,4] > 0)[0]
plt.plot(levels[good,3]/levels[good,4])
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
if numResampleLogX > 1:
plt.draw()
# Convert to lists of tuples
logl_levels = [(levels[i,1], levels[i, 2]) for i in xrange(0, levels.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in xrange(0, sample.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
sandwich *= 0
for i in xrange(0, sample.shape[0]):
while sandwich[i] < levels.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in xrange(0, numResampleLogX):
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in xrange(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in xrange(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
if z == 0:
plt.subplot(2,1,1)
plt.plot(logx_samples[:,z], sample_info[:,1], 'b.', label='Samples')
plt.hold(True)
plt.plot(levels[1:,0], levels[1:,1], 'r.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
if numResampleLogX > 1:
plt.draw()
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.hold(False)
plt.plot(logx_samples[:,z], P_samples[:,z], 'b.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
if numResampleLogX > 1:
plt.draw()
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
print("log(Z) = " + str(logz_estimate) + " +- " + str(logz_error))
print("Information = " + str(H_estimate) + " +- " + str(H_error) + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(ESS)
posterior_sample = np.zeros((N, sample.shape[1]))
w = P_samples
w = w/np.max(w)
if save:
np.savetxt('weights.txt', w) # Save weights
for i in xrange(0, N):
while True:
which = np.random.randint(sample.shape[0])
if np.random.rand() <= w[which]:
break
posterior_sample[i,:] = sample[which,:]
if save:
np.savetxt("posterior_sample.txt", posterior_sample)
if plot:
if numResampleLogX > 1:
plt.ioff()
plt.show()
return [logz_estimate, H_estimate, logx_samples, logp_samples.flatten()]
|
gpl-3.0
|
raymondxyang/tensorflow
|
tensorflow/python/saved_model/loader.py
|
127
|
2726
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loader functionality for SavedModel with hermetic, language-neutral exports.
Load and restore capability for a SavedModel, which may include multiple meta
graph defs. Each SavedModel is associated with a single checkpoint. Each meta
graph def is saved with one or more tags, which are used to identify the exact
meta graph def to load.
The `load` operation requires the session in which to restore the graph
definition and variables, the tags used to identify the meta graph def to
load and the location of the SavedModel.
Upon a load, the subset of variables and assets supplied as part of the specific
meta graph def, will be restored into the supplied session. The values of the
variables though will correspond to the saved values from the first meta graph
added to the SavedModel using `add_meta_graph_and_variables(...)` in
`builder.py`.
Typical usage:
```python
...
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
["foo-tag"],
signature_def_map=foo_signatures,
assets_collection=foo_assets)
...
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph(["bar-tag", "baz-tag"],
assets_collection=bar_baz_assets)
...
builder.save()
...
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, ["foo-tag"], export_dir)
...
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model.loader_impl import load
from tensorflow.python.saved_model.loader_impl import maybe_saved_model_directory
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"load",
"maybe_saved_model_directory",
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
111t8e/h2o-2
|
src/test/python/take_one_test.py
|
9
|
1096
|
import unittest
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import after_class
from proboscis import before_class
from proboscis import SkipTest
from proboscis import test
import random
import types
import unittest
import sys, pprint
sys.path.extend(['.','..','py'])
@test(groups=["acceptance"])
class TestModelManagement(object):
@test(groups=['acceptance'])
def testA(self):
self.a = "a"
assert_equal(self.a, "a")
@test(groups=["acceptance"], depends_on=[testA])
def testB(self):
self.b = "b"
assert_equal ( self.b, "b" )
@test(groups=["acceptance"], depends_on=[testB])
def testC(self):
self.c = "c"
assert_equal ( self.c, "c" )
## ----------------- proboscis boiler plate hook -------------------------
# no reason to modify anything below
def run_tests():
from proboscis import TestProgram
TestProgram().run_and_exit()
if __name__ =='__main__':
run_tests()
|
apache-2.0
|
vgupta6/Project-2
|
tests/unit_tests/modules/s3/s3gis/TrueCodePaths.py
|
20
|
13547
|
import unittest
class TrueCodePaths(unittest.TestCase):
def setUp(self):
vars = request.vars
vars["lat"] = 0
vars["lon"] = 0
vars["zoom"] = 1
self.old_s3roles = list(session.s3.roles)
session.s3.roles.append(1)
def tearDown(self):
vars = request.vars
del vars["lat"]
del vars["lon"]
del vars["zoom"]
session.s3.roles = self.old_s3roles
def check(test, scripts):
expected = [
"S3.public_url = 'http://127.0.0.1:8000';",
"S3.gis.mapAdmin = true;",
"S3.gis.window = true;",
"S3.gis.windowHide = true;",
"S3.gis.west_collapsed = true;",
"S3.gis.map_height = 123;",
"S3.gis.map_width = 123;",
"S3.gis.zoom = 1;",
"S3.gis.lat, S3.gis.lon;",
"S3.gis.bottom_left = new OpenLayers.LonLat(-10.000000, -10.000000);",
"S3.gis.top_right = new OpenLayers.LonLat(10.000000, 10.000000);",
"S3.gis.projection = '900913';",
"S3.gis.units = 'm';",
"S3.gis.maxResolution = 156543.033900;",
"S3.gis.maxExtent = new OpenLayers.Bounds(-20037508, -20037508, 20037508, 20037508.34);",
"S3.gis.numZoomLevels = 22;",
"S3.gis.max_w = 30;",
"S3.gis.max_h = 35;",
"S3.gis.mouse_position = 'mgrs';",
"S3.gis.wms_browser_name = 'Test WMS browser';",
"S3.gis.wms_browser_url = 'test%3A//test_WMS_URL';",
"S3.gis.draw_feature = 'inactive';",
"S3.gis.draw_polygon = 'inactive';",
"S3.gis.marker_default = 'gis_marker.image.marker_red.png';",
"S3.gis.marker_default_height = 34;",
"S3.gis.marker_default_width = 20;",
"S3.i18n.gis_legend = 'Legend';",
"S3.i18n.gis_search = 'Search Geonames';",
"S3.i18n.gis_search_no_internet = 'Geonames.org search requires Internet connectivity!';",
"S3.i18n.gis_requires_login = 'Requires Login';",
"S3.i18n.gis_base_layers = 'Base Layers';",
"S3.i18n.gis_overlays = 'Overlays';",
"S3.i18n.gis_layers = 'Layers';",
"S3.i18n.gis_draft_layer = 'Draft Features';",
"S3.i18n.gis_cluster_multiple = 'There are multiple records at this location';",
"S3.i18n.gis_loading = 'Loading';",
"S3.i18n.gis_length_message = 'The length is';",
"S3.i18n.gis_area_message = 'The area is';",
"S3.i18n.gis_length_tooltip = 'Measure Length: Click the points along the path & end with a double-click';",
"S3.i18n.gis_area_tooltip = 'Measure Area: Click the points around the polygon & end with a double-click';",
"S3.i18n.gis_zoomfull = 'Zoom to maximum map extent';",
"S3.i18n.gis_zoomout = 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle';",
"S3.i18n.gis_zoomin = 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle';",
"S3.i18n.gis_pan = 'Pan Map: keep the left mouse button pressed and drag the map';",
"S3.i18n.gis_navPrevious = 'Previous View';",
"S3.i18n.gis_navNext = 'Next View';",
"S3.i18n.gis_geoLocate = 'Zoom to Current Location';",
"S3.i18n.gis_draw_feature = 'Add Point';",
"S3.i18n.gis_draw_polygon = 'Add Polygon';",
"S3.i18n.gis_save = 'Save: Default Lat, Lon & Zoom for the Viewport';",
"S3.i18n.gis_potlatch = 'Edit the OpenStreetMap data for this area';",
"S3.i18n.gis_current_location = 'Current Location';",
"""if (typeof(printCapabilities) != 'undefined') {
// info.json from script headers OK
printProvider = new GeoExt.data.PrintProvider({
//method: 'POST',
//url: 'test_print_script_url/',
method: 'GET', // 'POST' recommended for production use
capabilities: printCapabilities, // from the info.json returned from the script headers
customParams: {
mapTitle: 'Test Map Title',
subTitle: 'Printed from Sahana Eden',
creator: ''
}
});
// Our print page. Stores scale, center and rotation and gives us a page
// extent feature that we can add to a layer.
printPage = new GeoExt.data.PrintPage({
printProvider: printProvider
});
//var printExtent = new GeoExt.plugins.PrintExtent({
// printProvider: printProvider
//});
// A layer to display the print page extent
//var pageLayer = new OpenLayers.Layer.Vector('Print Extent');
//pageLayer.addFeatures(printPage.feature);
//pageLayer.setVisibility(false);
//map.addLayer(pageLayer);
//var pageControl = new OpenLayers.Control.TransformFeature();
//map.addControl(pageControl);
//map.setOptions({
// eventListeners: {
// recenter/resize page extent after pan/zoom
// 'moveend': function() {
// printPage.fit(mapPanel, true);
// }
// }
//});
// The form with fields controlling the print output
S3.gis.printFormPanel = new Ext.form.FormPanel({
title: 'Print Map',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%%'},
listeners: {
'expand': function() {
//if (null == mapPanel.map.getLayersByName('Print Extent')[0]) {
// mapPanel.map.addLayer(pageLayer);
//}
if (null == mapPanel.plugins[0]) {
//map.addLayer(pageLayer);
//pageControl.activate();
//mapPanel.plugins = [ new GeoExt.plugins.PrintExtent({
// printProvider: printProvider,
// map: map,
// layer: pageLayer,
// control: pageControl
//}) ];
//mapPanel.plugins[0].addPage();
}
},
'collapse': function() {
//mapPanel.map.removeLayer(pageLayer);
//if (null != mapPanel.plugins[0]) {
// map.removeLayer(pageLayer);
// mapPanel.plugins[0].removePage(mapPanel.plugins[0].pages[0]);
// mapPanel.plugins = [];
//}
}
},
items: [{
xtype: 'textarea',
name: 'comment',
value: '',
fieldLabel: 'Comment',
plugins: new GeoExt.plugins.PrintPageField({
printPage: printPage
})
}, {
xtype: 'combo',
store: printProvider.layouts,
displayField: 'name',
fieldLabel: 'Layout',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
})
}, {
xtype: 'combo',
store: printProvider.dpis,
displayField: 'name',
fieldLabel: 'Resolution',
tpl: '<tpl for="."><div class="x-combo-list-item">{name} dpi</div></tpl>',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
}),
// the plugin will work even if we modify a combo value
setValue: function(v) {
v = parseInt(v) + ' dpi';
Ext.form.ComboBox.prototype.setValue.apply(this, arguments);
}
//}, {
// xtype: 'combo',
// store: printProvider.scales,
// displayField: 'name',
// fieldLabel: 'Scale',
// typeAhead: true,
// mode: 'local',
// triggerAction: 'all',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
//}, {
// xtype: 'textfield',
// name: 'rotation',
// fieldLabel: 'Rotation',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
}],
buttons: [{
text: 'Create PDF',
handler: function() {
// the PrintExtent plugin is the mapPanel's 1st plugin
//mapPanel.plugins[0].print();
// convenient way to fit the print page to the visible map area
printPage.fit(mapPanel, true);
// print the page, including the legend, where available
if (null == legendPanel) {
printProvider.print(mapPanel, printPage);
} else {
printProvider.print(mapPanel, printPage, {legend: legendPanel});
}
}
}]
});
} else {
// Display error diagnostic
S3.gis.printFormPanel = new Ext.Panel ({
title: 'Print Map',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%'},
html: 'Printing disabled since server not accessible: <BR />test_print_script_url/'
});
}"""
]
test_gis = s3base.GIS()
actual_output = str(
test_gis.show_map(
projection = 900913,
height = 123,
width = 123,
bbox = dict(
max_lat= 10,
min_lat= -10,
max_lon= 10,
min_lon= -10
),
legend = "Test",
add_feature = True,
add_polygon = True,
window = True,
closable = True,
mouse_position = "mgrs",
wms_browser = {
"name": "Test WMS browser",
"url": "test://test_WMS_URL"
},
print_tool = {
"url": "test_print_script_url/",
"subTitle": "Tested from TestS3GIS",
# looks like a bug: "mapTitle" vs "title"
"title": "Test print tool",
"mapTitle": "Test Map Title"
},
collapsed = True,
window_hide = True,
catalogue_toolbar = True,
toolbar = True,
search = True,
catalogue_layers = True,
zoom = 1,
)
)
for expected_line in expected:
assert expected_line in actual_output
substitutions = dict(application_name = request.application)
for script in scripts:
script_string = "<script src=\"%s\" type=\"text/javascript\"></script>" % (
script % substitutions
)
assert script_string in actual_output
def test_true_code_paths_with_debug(self):
current.session.s3.debug = True
self.check(
scripts = (
"/%(application_name)s/static/scripts/gis/usng2.js",
"/%(application_name)s/static/scripts/gis/MP.js",
"/%(application_name)s/static/test_print_script_url/info.json?var=printCapabilities",
)
)
def test_true_code_paths(self):
"Basic map with true code paths turned on"
current.session.s3.debug = False
self.check(
scripts = (
"/%(application_name)s/static/test_print_script_url/info.json?var=printCapabilities",
"/%(application_name)s/static/scripts/gis/MGRS.min.js",
)
)
|
mit
|
gvangool/django-extensions
|
django_extensions/management/commands/drop_test_database.py
|
25
|
5810
|
import logging
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from six.moves import configparser, input
from django_extensions.management.utils import signalcommand
try:
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
except ImportError:
# Django < 1.7
from django.db.backends.creation import TEST_DATABASE_PREFIX
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false',
dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('-U', '--user', action='store',
dest='user', default=None,
help='Use another user for the database then defined in settings.py'),
make_option('-P', '--password', action='store',
dest='password', default=None,
help='Use another password for the database then defined in settings.py'),
make_option('-D', '--dbname', action='store',
dest='dbname', default=None,
help='Use another database name then defined in settings.py'),
make_option('-R', '--router', action='store',
dest='router', default='default',
help='Use this router-database other then defined in settings.py'),
)
help = "Drops test database for this project."
@signalcommand
def handle(self, *args, **options):
"""
Drop test database for this project.
"""
if args:
raise CommandError("reset_db takes no arguments")
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
user = password = database_name = ''
if engine == 'mysql':
read_default_file = dbinfo.get('OPTIONS', {}).get('read_default_file')
if read_default_file:
config = configparser.ConfigParser()
config.read(read_default_file)
user = config.get('client', 'user')
password = config.get('client', 'password')
database_name = config.get('client', 'database')
user = options.get('user') or dbinfo.get('USER') or user
password = options.get('password') or dbinfo.get('PASSWORD') or password
try:
database_name = dbinfo['TEST']['NAME']
except KeyError:
database_name = None
if database_name is None:
database_name = TEST_DATABASE_PREFIX + (options.get('dbname') or dbinfo.get('NAME'))
if database_name is None or database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST')
database_port = dbinfo.get('PORT')
verbosity = int(options.get('verbosity', 1))
if options.get('interactive'):
confirm = input("""
You have requested to drop the test database.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (database_name,))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
if engine in ('sqlite3', 'spatialite'):
import os
try:
logging.info("Unlinking %s database" % engine)
if os.path.isfile(database_name):
os.unlink(database_name)
except OSError:
pass
elif engine in ('mysql',):
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
logging.info('Executing: "' + drop_query + '"')
connection.query(drop_query)
elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'):
if engine == 'postgresql':
import psycopg as Database # NOQA
elif engine in ('postgresql_psycopg2', 'postgis'):
import psycopg2 as Database # NOQA
conn_string = "dbname=template1"
if user:
conn_string += " user=%s" % user
if password:
conn_string += " password='%s'" % password
if database_host:
conn_string += " host=%s" % database_host
if database_port:
conn_string += " port=%s" % database_port
connection = Database.connect(conn_string)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
drop_query = "DROP DATABASE IF EXISTS \"%s\";" % database_name
logging.info('Executing: "' + drop_query + '"')
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options.get('interactive'):
print("Reset successful.")
|
mit
|
atrick/swift
|
utils/split_file.py
|
65
|
1410
|
#!/usr/bin/env python
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser(
description="""
Take the file at <path> and write it to multiple files, switching to a new file
every time an annotation of the form "// BEGIN file1.swift" is encountered. If
<dir> is specified, place the files in <dir>; otherwise, put them in the
current directory.
""")
parser.add_argument(
"-o", dest="out_dir", default=".", metavar="<dir>",
help="directory path where the output files are placed in. "
"(defaults to current directory)")
parser.add_argument(
"input", type=argparse.FileType("r"), nargs="?", default=sys.stdin,
metavar="<path>",
help="input file. (defaults to stdin)")
args = parser.parse_args()
fp_out = None
for line in args.input:
m = re.match(r'^//\s*BEGIN\s+([^\s]+)\s*$', line)
if m:
if fp_out:
fp_out.close()
fp_out = open(os.path.join(args.out_dir, m.group(1)), 'w')
elif fp_out:
fp_out.write(line)
args.input.close()
if fp_out:
fp_out.close()
|
apache-2.0
|
caot/intellij-community
|
python/edu/interactive-learning-python/resources/com/jetbrains/python/edu/user_tester.py
|
41
|
1825
|
import sys
import imp
import os
import subprocess
USER_TESTS = "userTests"
TEST_FAILED = "FAILED"
TEST_PASSED = "PASSED"
INPUT = "input"
OUTPUT = "output"
def get_index(logical_name, full_name):
logical_name_len = len(logical_name)
if full_name[:logical_name_len] == logical_name:
return int(full_name[logical_name_len])
return -1
def process_user_tests(file_path):
user_tests = []
imp.load_source('user_file', file_path)
user_tests_dir_path = os.path.abspath(os.path.join(file_path, os.pardir, USER_TESTS))
user_test_files = os.listdir(user_tests_dir_path)
for user_file in user_test_files:
index = get_index(INPUT, user_file)
if index == -1:
continue
output = OUTPUT + str(index)
if output in user_test_files:
input_path = os.path.abspath(os.path.join(user_tests_dir_path, user_file))
output_path = os.path.abspath(os.path.join(user_tests_dir_path, output))
user_tests.append((input_path, output_path, index))
return sorted(user_tests, key=(lambda x: x[2]))
def run_user_test(python, executable_path):
user_tests = process_user_tests(executable_path)
for test in user_tests:
input, output, index = test
test_output = subprocess.check_output([python, executable_path, input])
expected_output = open(output).read()
test_status = TEST_PASSED if test_output == expected_output else TEST_FAILED
print "TEST" + str(index) + " " + test_status
print "OUTPUT:"
print test_output + "\n"
if test_status == TEST_FAILED:
print "EXPECTED OUTPUT:"
print expected_output + "\n"
if __name__ == "__main__":
python = sys.argv[1]
executable_path = sys.argv[2]
run_user_test(python , executable_path)
|
apache-2.0
|
koolventure/replicator
|
dnaEncoder.py
|
1
|
1184
|
#Software to encode binary information to DNA nucleotides
import binascii
import re
class DNAEncoder:
def __init__(self):
self.binaryArray = []
self.nucleotideArray = []
#This section of code is used for the express purpose of encoding binary information
#Creates a demilited binary array
def normalize(self, stringIn):
binary = str(bin(int.from_bytes(stringIn.encode(), 'big')))
binary = re.sub('b', '', binary)
self.binaryArray = [binary[i:i+2] for i in range(0, len(binary), 2)]
return self.binaryArray
#Converts to nucleotides
def convert(self):
for x in self.binaryArray:
if x == '00':
self.nucleotideArray.append('A')
elif x == '11':
self.nucleotideArray.append('T')
elif x == '01':
self.nucleotideArray.append('G')
elif x == '10':
self.nucleotideArray.append('C')
else:
print("Error")
return self.nucleotideArray
def getBinaryArray(self):
return self.binaryArray
def getNucleotideArray(self):
return self.nucleotideArray
|
mit
|
nickpack/reportlab
|
tests/test_platypus_tables.py
|
1
|
38444
|
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id: test_platypus_tables.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__='Test script for reportlab.tables'
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import os,unittest
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.graphics.charts.linecharts import HorizontalLineChart
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin
from reportlab.graphics.charts.barcharts import VerticalBarChart
styleSheet = getSampleStyleSheet()
def getTable():
t = Table((('','North','South','East','West'),
('Quarter 1',100,200,300,400),
('Quarter 2',100,400,600,800),
('Total',300,600,900,'1,200')),
(72,36,36,36,36),
(24, 16,16,18)
)
return t
def makeStyles():
styles = []
for i in range(5):
styles.append(TableStyle([('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('ALIGN', (0,0), (-1,0), 'CENTRE'),
('HREF', (0,0), (0,0), 'www.google.com'),
]))
for style in styles[1:]:
style.add('GRID', (0,0), (-1,-1), 0.25, colors.black)
for style in styles[2:]:
style.add('LINEBELOW', (0,0), (-1,0), 2, colors.black)
for style in styles[3:]:
style.add('LINEABOVE', (0, -1), (-1,-1), 2, colors.black)
styles[-1].add('LINEBELOW',(1,-1), (-1, -1), 2, (0.5, 0.5, 0.5))
return styles
def run():
doc = SimpleDocTemplate(outputfile('test_platypus_tables.pdf'), pagesize=(8.5*inch, 11*inch), showBoundary=1)
lst = []
from reportlab import Version
styNormal = styleSheet['Normal']
styBackground = ParagraphStyle('background', parent=styNormal, backColor=colors.pink)
styH1 = styleSheet['Heading1']
lst.append(Paragraph("First, a test of how tables align their content...", styH1))
lst.append(Paragraph("""Generated with version %s""" % Version,
styNormal))
lst.append(Paragraph("""In release 2.3, cells with plain text positioned their
text differently to cells with Paragraphs using the
same font. Hopefully now they are back on the same baseline""",
styNormal))
ts1 = TableStyle([
('ALIGN', (0,0), (-1,0), 'RIGHT'),
('BACKGROUND', (0,0), (-1,0), colors.lightgrey),
('VALIGN', (0,0), (-1,-1), 'TOP'),
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
t1 = Table([
('plain text','plain text','shortpara','plain text', 'long para'),
('Text','more text', Paragraph('Is this para level?', styBackground), 'Back to text', Paragraph('Short para again', styBackground)),
('Text',
'more text',
Paragraph('Is this level?', styBackground),
'This is plain\ntext with line breaks\nto compare against\nthe para on right',
Paragraph('Long paragraph we expect to wrap over several lines accurately', styBackground)),
])
t1.setStyle(ts1)
lst.append(t1)
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a string...note how the text sits low", styNormal))
tsGrid = TableStyle([
('GRID', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(Table([['One cell of plain text']], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Now we make a table with just one cell containing a para...should be same position. Note that the overall bounding box is an approximation and lies - it always did.", styNormal))
lst.append(Table([[Paragraph('One cell containing a paragraph. ÄÉ∫', styBackground)]], style=tsGrid, colWidths=[200]))
lst.append(Spacer(0,10))
lst.append(Paragraph("Paragraphs jumped up post 2.1. Ideally they should align the same.", styNormal))
lst.append(Spacer(0,30))
lst.append(Paragraph("Now for all the tests we had before. See also the much longer test_platypus_tables_2.pdf, which for reasons unknown was split into a separate file generated by the same script", styNormal))
styles = makeStyles()
for style in styles:
t = getTable()
t.setStyle(style)
## print '--------------'
## for rowstyle in t._cellstyles:
## for s in rowstyle:
## print s.alignment
lst.append(t)
lst.append(Spacer(0,12))
doc.build(lst)
class TableBarChart(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.width = 136
self.height = 140
self._add(self,VerticalBarChart(),name='chart',validate=None,desc=None)
self.chart.y = 20
self.chart.width = self.width - 21
self.chart.height = self.height - 24
self.chart.categoryAxis.categoryNames = ['Spring','Summer','Autumn','Winter']
self.chart.categoryAxis.labels.fontSize = 7
def old_tables_test():
from reportlab.lib.units import inch, cm
from reportlab.platypus.flowables import Image, PageBreak, Spacer, XBox
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.flowables import Preformatted
from reportlab.platypus.doctemplate import SimpleDocTemplate
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.tables import GRID_STYLE, BOX_STYLE, LABELED_GRID_STYLE, COLORED_GRID_STYLE, LIST_STYLE, LongTable
rowheights = (24, 16, 16, 16, 16)
rowheights2 = (24, 16, 16, 16, 30)
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
data2 = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats\nLarge', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
lst = []
lst.append(Paragraph("Tables", styleSheet['Heading1']))
lst.append(Paragraph(__doc__, styleSheet['BodyText']))
lst.append(Paragraph("The Tables (shown in different styles below) were created using the following code:", styleSheet['BodyText']))
lst.append(Preformatted("""
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
rowheights = (24, 16, 16, 16, 16)
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159,
888, '1,298', 832, 453, '1,344','2,843')
)
t = Table(data, colwidths, rowheights)
""", styleSheet['Code'], dedent=4))
lst.append(Paragraph("""
You can then give the Table a TableStyle object to control its format. The first TableStyle used was
created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(Paragraph("""
TableStyles are created by passing in a list of commands. There are two types of commands - line commands
and cell formatting commands. In all cases, the first three elements of a command are the command name,
the starting cell and the ending cell.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Line commands always follow this with the weight and color of the desired lines. Colors can be names,
or they can be specified as a (R,G,B) tuple, where R, G and B are floats and (0,0,0) is black. The line
command names are: GRID, BOX, OUTLINE, INNERGRID, LINEBELOW, LINEABOVE, LINEBEFORE
and LINEAFTER. BOX and OUTLINE are equivalent, and GRID is the equivalent of applying both BOX and
INNERGRID.
""", styleSheet['BodyText']))
lst.append(Paragraph("""
Cell formatting commands are:
""", styleSheet['BodyText']))
lst.append(Paragraph("""
FONT - takes fontname, fontsize and (optional) leading.
""", styleSheet['Definition']))
lst.append(Paragraph("""
TEXTCOLOR - takes a color name or (R,G,B) tuple.
""", styleSheet['Definition']))
lst.append(Paragraph("""
ALIGNMENT (or ALIGN) - takes one of LEFT, RIGHT, CENTRE (or CENTER) or DECIMAL.
""", styleSheet['Definition']))
lst.append(Paragraph("""
LEFTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
RIGHTPADDING - defaults to 6.
""", styleSheet['Definition']))
lst.append(Paragraph("""
BOTTOMPADDING - defaults to 3.
""", styleSheet['Definition']))
lst.append(Paragraph("""
A tablestyle is applied to a table by calling Table.setStyle(tablestyle).
""", styleSheet['BodyText']))
t = Table(data, colwidths, rowheights)
t.setStyle(GRID_STYLE)
lst.append(PageBreak())
lst.append(Paragraph("This is GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data, colwidths, rowheights)
t.setStyle(BOX_STYLE)
lst.append(Paragraph("This is BOX_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
BOX_STYLE = TableStyle(
[('BOX', (0,0), (-1,-1), 0.50, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
t = Table(data2, colwidths, rowheights2)
t.setStyle(LABELED_GRID_STYLE)
lst.append(Paragraph("This is LABELED_GRID_STYLE ILLUSTRATES EXPLICIT LINE SPLITTING WITH NEWLINE (different heights and data)\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LABELED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.black),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
lst.append(PageBreak())
t = Table(data, colwidths, rowheights)
t.setStyle(COLORED_GRID_STYLE)
lst.append(Paragraph("This is COLORED_GRID_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
COLORED_GRID_STYLE = TableStyle(
[('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 2, colors.red),
('LINEBELOW', (0,0), (-1,0), 2, colors.black),
('LINEAFTER', (0,0), (0,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
t.setStyle(LIST_STYLE)
lst.append(Paragraph("This is LIST_STYLE\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
LIST_STYLE = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
""", styleSheet['Code']))
t = Table(data, colwidths, rowheights)
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
t.setStyle(ts)
lst.append(Paragraph("This is a custom style\n", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
It was created as follows:
""", styleSheet['BodyText']))
lst.append(Preformatted("""
ts = TableStyle(
[('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 3, colors.green,'butt'),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.white,'butt'),
('ALIGN', (1,1), (-1,-1), 'RIGHT'),
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))]
)
""", styleSheet['Code']))
data = (
('', 'Jan\nCold', 'Feb\n', 'Mar\n','Apr\n','May\n', 'Jun\nHot', 'Jul\n', 'Aug\nThunder', 'Sep\n', 'Oct\n', 'Nov\n', 'Dec\n'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
c = list(colwidths)
c[0] = None
c[8] = None
t = Table(data, c, [None]+list(rowheights[1:]))
t.setStyle(LIST_STYLE)
lst.append(Paragraph("""
This is a LIST_STYLE table with the first rowheight set to None ie automatic.
The top row cells are split at a newline '\\n' character. The first and August
column widths were also set to None.
""", styleSheet['BodyText']))
lst.append(t)
lst.append(Paragraph("""
This demonstrates a number of features useful in financial statements. The first is decimal alignment;
with ALIGN=DECIMAL the numbers align on the points; and the points are aligned based on
the RIGHTPADDING, which is usually 3 points so you should set it higher. The second is multiple lines;
one can specify double or triple lines and control the separation if desired. Finally, the coloured
negative numbers were (we regret to say) done in the style; we don't have a way to conditionally
format numbers based on value yet.
""", styleSheet['BodyText']))
t = Table([[u'Corporate Assets','Amount'],
['Fixed Assets','1,234,567.89'],
['Company Vehicle','1,234.8901'],
['Petty Cash','42'],
[u'Intellectual Property\u00ae','(42,078,231.56)'],
['Overdraft','(12,345)'],
['Boardroom Flat Screen','60 inches'],
['Net Position','Deep Sh*t.Really']
],
[144,72])
ts = TableStyle(
[#first the top row
('ALIGN', (1,1), (-1,-1), 'CENTER'),
('LINEABOVE', (0,0), (-1,0), 1, colors.purple),
('LINEBELOW', (0,0), (-1,0), 1, colors.purple),
('FONT', (0,0), (-1,0), 'Times-Bold'),
#bottom row has a line above, and two lines below
('LINEABOVE', (0,-1), (-1,-1), 1, colors.purple), #last 2 are count, sep
('LINEBELOW', (0,-1), (-1,-1), 0.5, colors.purple, 1, None, None, 4,1),
('LINEBELOW', (0,-1), (-1,-1), 1, colors.red),
('FONT', (0,-1), (-1,-1), 'Times-Bold'),
#numbers column
('ALIGN', (1,1), (-1,-1), 'DECIMAL'),
('RIGHTPADDING', (1,1), (-1,-1), 36),
('TEXTCOLOR', (1,4), (1,4), colors.red),
#red cell
]
)
t.setStyle(ts)
lst.append(t)
lst.append(Spacer(36,36))
lst.append(Paragraph("""
The red numbers should be aligned LEFT & BOTTOM, the blue RIGHT & TOP
and the green CENTER & MIDDLE.
""", styleSheet['BodyText']))
XY = [['X00y', 'X01y', 'X02y', 'X03y', 'X04y'],
['X10y', 'X11y', 'X12y', 'X13y', 'X14y'],
['X20y', 'X21y', 'X22y', 'X23y', 'X24y'],
['X30y', 'X31y', 'X32y', 'X33y', 'X34y']]
t=Table(XY, 5*[0.6*inch], 4*[0.6*inch])
t.setStyle([('ALIGN',(1,1),(-2,-2),'LEFT'),
('TEXTCOLOR',(1,1),(-2,-2),colors.red),
('VALIGN',(0,0),(1,-1),'TOP'),
('ALIGN',(0,0),(1,-1),'RIGHT'),
('TEXTCOLOR',(0,0),(1,-1),colors.blue),
('ALIGN',(0,-1),(-1,-1),'CENTER'),
('VALIGN',(0,-1),(-1,-1),'MIDDLE'),
('TEXTCOLOR',(0,-1),(-1,-1),colors.green),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
data = [('alignment', 'align\012alignment'),
('bulletColor', 'bulletcolor\012bcolor'),
('bulletFontName', 'bfont\012bulletfontname'),
('bulletFontSize', 'bfontsize\012bulletfontsize'),
('bulletIndent', 'bindent\012bulletindent'),
('firstLineIndent', 'findent\012firstlineindent'),
('fontName', 'face\012fontname\012font'),
('fontSize', 'size\012fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent\012lindent'),
('rightIndent', 'rightindent\012rindent'),
('spaceAfter', 'spaceafter\012spacea'),
('spaceBefore', 'spacebefore\012spaceb'),
('textColor', 'fg\012textcolor\012color')]
t = Table(data)
t.setStyle([
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
lst.append(t)
t = Table([ ('Attribute', 'Synonyms'),
('alignment', 'align, alignment'),
('bulletColor', 'bulletcolor, bcolor'),
('bulletFontName', 'bfont, bulletfontname'),
('bulletFontSize', 'bfontsize, bulletfontsize'),
('bulletIndent', 'bindent, bulletindent'),
('firstLineIndent', 'findent, firstlineindent'),
('fontName', 'face, fontname, font'),
('fontSize', 'size, fontsize'),
('leading', 'leading'),
('leftIndent', 'leftindent, lindent'),
('rightIndent', 'rightindent, rindent'),
('spaceAfter', 'spaceafter, spacea'),
('spaceBefore', 'spacebefore, spaceb'),
('textColor', 'fg, textcolor, color')])
t.repeatRows = 1
t.setStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('FONT',(0,1),(-1,-1),'Courier',8,8),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('BACKGROUND', (0, 0), (-1, 0), colors.green),
('BACKGROUND', (0, 1), (-1, -1), colors.pink),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (0, 1), (0, -1), 'LEFT'),
('ALIGN', (-1, 1), (-1, -1), 'RIGHT'),
('FONT', (0, 0), (-1, 0), 'Times-Bold', 12),
('ALIGN', (1, 1), (1, -1), 'CENTER'),
])
lst.append(t)
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 5,6),
('GRID', (0,0), (-1,-1), 0.25, colors.blue),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 10,12),
('GRID', (0,0), (-1,-1), 0.25, colors.black),]))
lst.append(Table(XY,
style=[ ('FONT',(0,0),(-1,-1),'Times-Roman', 20,24),
('GRID', (0,0), (-1,-1), 0.25, colors.red),]))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
])
lst.append(Paragraph("Illustrating splits: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', '34']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
]
t=Table(data,style=sty)
lst.append(Paragraph("Illustrating splits with spans: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
data= [['00', '01', '02', '03', '04'],
['', '11', '12', '13', ''],
['20', '21', '22', '23', '24'],
['30', '31', '', '33', ''],
['40', '41', '', '43', '44']]
sty=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('BOX',(0,0),(-1,-1),2,colors.black),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND',(-2,1),(-1,1),colors.palegreen),
('SPAN',(-2,1),(-1,1)),
('BACKGROUND',(-2,3),(-1,3),colors.yellow),
('SPAN',(-2,3),(-1,3)),
('BACKGROUND', (2, 3), (2, 4), colors.orange),
('SPAN',(2,3),(2,4)),
]
t=Table(data,style=sty,repeatRows=2)
lst.append(Paragraph("Illustrating splits with spans and repeatRows: nosplit", styleSheet['BodyText']))
lst.append(t)
lst.append(Spacer(0,6))
if 1:
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,30)", styleSheet['BodyText']))
for s in t.split(4*inch,30):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,36)", styleSheet['BodyText']))
for s in t.split(4*inch,36):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(Paragraph("Illustrating splits with spans and repeatRows: split(4in,56)", styleSheet['BodyText']))
lst.append(Spacer(0,6))
for s in t.split(4*inch,56):
lst.append(s)
lst.append(Spacer(0,6))
lst.append(PageBreak())
from reportlab.lib.testutils import testsFolder
I = Image(os.path.join(os.path.dirname(testsFolder),'tools','pythonpoint','demos','leftlogo.gif'))
I.drawHeight = 1.25*inch*I.drawHeight / I.drawWidth
I.drawWidth = 1.25*inch
#I.drawWidth = 9.25*inch #uncomment to see better messaging
P = Paragraph("<para align=center spaceb=3>The <b>ReportLab Left <font color=red>Logo</font></b> Image</para>", styleSheet["BodyText"])
B = TableBarChart()
BP = Paragraph("<para align=center spaceb=3>A bar chart in a cell.</para>", styleSheet["BodyText"])
data= [['A', 'B', 'C', Paragraph("<b>A pa<font color=red>r</font>a<i>graph</i></b><super><font color=yellow>1</font></super>",styleSheet["BodyText"]), 'D'],
['00', '01', '02', [I,P], '04'],
['10', '11', '12', [I,P], '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34'],
['40', '41', '42', [B,BP], '44']]
t=Table(data,style=[('GRID',(1,1),(-2,-2),1,colors.green),
('BOX',(0,0),(1,-1),2,colors.red),
('LINEABOVE',(1,2),(-2,2),1,colors.blue),
('LINEBEFORE',(2,1),(2,-2),1,colors.pink),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('BACKGROUND', (1, 1), (1, 2), colors.lavender),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('BOX',(0,0),(-1,-1),2,colors.black),
('GRID',(0,0),(-1,-1),0.5,colors.black),
('VALIGN',(3,0),(3,0),'BOTTOM'),
('BACKGROUND',(3,0),(3,0),colors.limegreen),
('BACKGROUND',(3,1),(3,1),colors.khaki),
('ALIGN',(3,1),(3,1),'CENTER'),
('BACKGROUND',(3,2),(3,2),colors.beige),
('ALIGN',(3,2),(3,2),'LEFT'),
])
t._argW[3]=1.5*inch
lst.append(t)
# now for an attempt at column spanning.
lst.append(PageBreak())
data= [['A', 'BBBBB', 'C', 'D', 'E'],
['00', '01', '02', '03', '04'],
['10', '11', '12', '13', '14'],
['20', '21', '22', '23', '24'],
['30', '31', '32', '33', '34']]
sty = [
('ALIGN',(0,0),(-1,-1),'CENTER'),
('VALIGN',(0,0),(-1,-1),'TOP'),
('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
#span 'BBBB' across middle 3 cells in top row
('SPAN',(1,0),(3,0)),
#now color the first cell in this range only,
#i.e. the one we want to have spanned. Hopefuly
#the range of 3 will come out khaki.
('BACKGROUND',(1,0),(1,0),colors.khaki),
('SPAN',(0,2),(-1,2)),
#span 'AAA'down entire left column
('SPAN',(0,0), (0, 1)),
('BACKGROUND',(0,0),(0,0),colors.cyan),
('LINEBELOW', (0,'splitlast'), (-1,'splitlast'), 1, colors.white,'butt'),
]
t=Table(data,style=sty, colWidths = [20] * 5, rowHeights = [20]*5)
lst.append(t)
# now for an attempt at percentage widths
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=5*['14%']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%'] * 5, rowHeights = [20]*5)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph("This table has colWidths=['14%','10%','19%','22%','*']!", styleSheet['BodyText']))
t=Table(data,style=sty, colWidths = ['14%','10%','19%','22%','*'], rowHeights = [20]*5)
lst.append(t)
# Mike's test example
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
[Paragraph('World <font color="green">Domination</font>: The First Five Years', styleSheet['BodyText']),''],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText']), ''],
]
t=Table(data, style=[('SPAN',(0,0),(1,0)),('SPAN',(0,1),(1,1)),('SPAN',(0,2),(1,2)),], colWidths = [3*cm,8*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('Mike\'s Non-spanning Example', styleSheet['Heading1']))
data= [[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World <font color="magenta">Domination</font>: The First Five Years', styleSheet['BodyText'])],
[Paragraph('World Domination: The First Five Years', styleSheet['BodyText'])],
]
t=Table(data, style=[], colWidths = [11*cm], rowHeights = [None]*3)
lst.append(t)
lst.append(Spacer(18,18))
lst.append(Paragraph('xpre example', styleSheet['Heading1']))
data= [ [
XPreformatted('Account Details', styleSheet['Heading3']),
'', XPreformatted('Client Details', styleSheet['Heading3']),
], #end of row 0
]
t=Table(data, style=[], colWidths = [80,230.0,80], rowHeights = [None]*1)
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph('Trying colour cycling in background', styleSheet['Heading1']))
lst.append(Paragraph("This should alternate pale blue and uncolored by row", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('ROWBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, None)),
])
lst.append(t)
lst.append(Spacer(0,6))
lst.append(Paragraph("And this should pale blue, pale pink and None by column", styleSheet['BodyText']))
data= [['001', '01', '02', '03', '04', '05'],
['002', '01', '02', '03', '04', '05'],
['003', '01', '02', '03', '04', '05'],
['004', '01', '02', '03', '04', '05'],
['005', '01', '02', '03', '04', '05'],
['006', '01', '02', '03', '04', '05'],
['007', '01', '02', '03', '04', '05'],
['008', '01', '02', '03', '04', '05'],
['009', '01', '02', '03', '04', '05'],
['010', '01', '02', '03', '04', '05'],
]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('COLBACKGROUNDS', (0, 0), (-1, -1), (0xD0D0FF, 0xFFD0D0, None)),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("This spanning example illustrates automatic removal of grids and lines in spanned cells!", styleSheet['BodyText']))
lst.append(Spacer(0,6))
data= [['Top\nLeft', '', '02', '03', '04', '05', '06', '07'],
['', '', '12', 'Span (3,1) (6,2)', '','','','17'],
['20', '21', '22', '', '','','','27'],
['30', '31', '32', '33', '34','35','36','37'],
['40', 'In The\nMiddle', '', '', '44','45','46','47'],
['50', '', '', '', '54','55','56','57'],
['60', '', '', '','64', '65', 'Bottom\nRight', ''],
['70', '71', '72', '73','74', '75', '', '']]
t=Table(data,style=[
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND',(0,0),(1,1),colors.palegreen),
('SPAN',(0,0),(1,1)),
('BACKGROUND',(-2,-2),(-1,-1), colors.pink),
('SPAN',(-2,-2),(-1,-1)),
('SPAN',(1,4),(3,6)),
('BACKGROUND',(1,4),(3,6), colors.lightblue),
('SPAN',(3,1),(6,2)),
('BACKGROUND',(3,1),(6,2), colors.peachpuff),
('VALIGN',(3,1),(6,2),'TOP'),
('LINEABOVE', (0,2),(-1,2), 1, colors.black, 0, None, None, 2, 2),
('LINEBEFORE', (3,0),(3,-1), 1, colors.black, 0, None, None, 2, 2),
])
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("und jetzt noch eine Tabelle mit 5000 Zeilen:", styleSheet['BodyText']))
sty = [ ('GRID',(0,0),(-1,-1),1,colors.green),
('BOX',(0,0),(-1,-1),2,colors.red),
]
data = [[str(i), Paragraph("xx "* (i%10), styleSheet["BodyText"]), Paragraph("blah "*(i%40), styleSheet["BodyText"])] for i in xrange(500)]
t=LongTable(data, style=sty, colWidths = [50,100,200])
lst.append(t)
#Yuan Hong's bug tester
lst.append(PageBreak())
lst.append(Paragraph('Yian Hong\'s Bug Case (should not blow up)', styleSheet['Heading2']))
data = ([['Col1', 'Col2', 'Col3', 'Col4', 'Col5']]+
[['01', Paragraph('This is cell one that contains a paragraph.', styleSheet['Normal']), '02', '03', '04']
for i in xrange(50)])
t = Table(data, ['20%']*5, repeatRows=1)
t.setStyle(TableStyle([
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
('SPAN', (0,50), (-2,50)),
]))
lst.append(t)
lst.append(PageBreak())
#Volker Haas' example extended
#the optimal row heights are the solution of an LP similar to
#
#Objective function
# min: 3*h0+3*h1+3*h2+2*h3;
#
#constraints
# h0>=12;
# h1>=12;
# h2>=12;
# h3>=12;
# h0+h1+h2>=48;
# h0+h1>=12;
# h2+h3>=60;
#
#the solution H=[12,12,24,36]
def makeTable(x,y):
return Table([
['00', '01', '02', '03', '04', '05\nline2\nline3\nline4'],
['', '11', '12', x, '',''],
['20', '21', y, '23', '24',''],
['30', '31', '', '33', '34','35'],
],
style=[
('TOPPADDING',(0,0),(-1,-1),0),
('BOTTOMPADDING',(0,0),(-1,-1),0),
('RIGHTPADDING',(0,0),(-1,-1),0),
('LEFTPADDING',(0,0),(-1,-1),0),
('GRID',(0,0),(-1,-1),0.5,colors.grey),
('BACKGROUND', (0, 0), (0, 1), colors.pink),
('SPAN',(0,0),(0,1)),
('BACKGROUND', (2, 2), (2, 3), colors.orange),
('SPAN',(2,2),(2,3)),
('SPAN',(3,1),(4,1)),
('SPAN',(5,0),(5,2)),
])
p_style= ParagraphStyle('Normal')
lst.append(makeTable(
Paragraph('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
XPreformatted('This is a string',p_style),
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
'22\nblub\nasfd\nafd\nasdfs',
))
lst.append(Spacer(10,10))
lst.append(makeTable(
'This is a string',
Paragraph('22<br/>blub<br/>asfd<br/>afd<br/>asdfs', p_style)
))
SimpleDocTemplate(outputfile('test_platypus_tables_2.pdf'), showBoundary=1).build(lst)
class TablesTestCase(unittest.TestCase):
"Make documents with tables"
def test0(self):
"Make a document full of tables"
run()
def test1(self):
"Make a document full of tables"
old_tables_test()
def makeSuite():
return makeSuiteForClasses(TablesTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
bsd-3-clause
|
sbesson/PyGithub
|
tests/PullRequest1375.py
|
3
|
2693
|
############################ Copyrights and license ############################
# #
# Copyright 2019 Olof-Joachim Frahm <olof@macrolet.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from . import Framework
class PullRequest1375(Framework.TestCase):
def setUp(self):
super().setUp()
self.pr = self.g.get_repo("rsn491/PyGithub").get_pulls()[0]
def testCreateReviewCommentReply(self):
comment_id = 373866377 # id of pull request comment without replies
first_reply_body = "Comment reply created by PyGithub"
second_reply_body = "Second comment reply created by PyGithub"
first_reply = self.pr.create_review_comment_reply(comment_id, first_reply_body)
second_reply = self.pr.create_review_comment_reply(
first_reply.id, second_reply_body
)
# ensure both first and second reply have `in_reply_to_id` attr set to top comment
self.assertEqual(first_reply.in_reply_to_id, comment_id)
self.assertEqual(second_reply.in_reply_to_id, comment_id)
self.assertEqual(first_reply.body, first_reply_body)
self.assertEqual(second_reply.body, second_reply_body)
|
lgpl-3.0
|
chihyaoma/Activity-Recognition-with-CNN-and-RNN
|
textOverlay/TextOverlay.py
|
1
|
4692
|
# This is a Python code using OpenCV to print the probabilities and predictions on videos
#
# I am working for a deadline. It's still messed up...
#
# Contact: Chih-Yao Ma at cyma@gatech.edu
import numpy as np
import cv2
import random
import re
n = 3 # number of predictions per video
nVideo = 3754 # number of total videos
# Define the dimension of input frames
height = 240
width = 320
# what scale do you want to upscale the frames
scale = 2
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (width*scale, height*scale))
# Read the list of predictions from txt file
with open('labels_rnn_20170328.txt') as rnn:
linesRNN = rnn.readlines()
numPred = np.size(linesRNN)
with open('labels_tcnn_20170328.txt') as tcnn:
linesTCNN = tcnn.readlines()
# how many videos you want to use as demo
numDemoVideos = 10
indVideo = random.sample(range(1, nVideo), numDemoVideos)
# Start processing for each of the videos..
for number in indVideo:
idx = (number-1)*n
# videoInfoRNN = linesRNN[number].split()
videoInfoRNN_1st = linesRNN[idx].split()
videoInfoRNN_2nd = linesRNN[idx+1].split()
videoInfoRNN_3rd = linesRNN[idx+2].split()
# videoInfoTCNN = linesTCNN[number].split()
videoInfoTCNN_1st = linesTCNN[idx].split()
videoInfoTCNN_2nd = linesTCNN[idx+1].split()
videoInfoTCNN_3rd = linesTCNN[idx+2].split()
# Read the video file
fileName = '/media/chih-yao/ssd-data/ucf101/video/' + videoInfoRNN_1st[0]
cap = cv2.VideoCapture(fileName)
# extract ground truth from file path
groundTruth = videoInfoRNN_1st[0].split('/')[0]
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
# upscale the frames
frame = cv2.resize(frame, (0,0), fx = scale, fy = scale)
# transparent overlay
overlay = frame.copy()
cv2.rectangle(overlay, (30, 45), (600, 110), (20, 20, 20), -1)
alpha = 0.5 # transparent ratio
cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
# get probabilities of predictions
probRNN = map(float, [videoInfoRNN_1st[2], videoInfoRNN_2nd[2], videoInfoRNN_3rd[2]])
probTCNN = map(float, [videoInfoTCNN_1st[2], videoInfoTCNN_2nd[2], videoInfoTCNN_3rd[2]])
lengthRNN = [x * 100 for x in probRNN]
lengthTCNN = [x * 100 for x in probTCNN]
# print bars as the probabilities
# probabilities for RNN
x_bar = 50
y_bar = 55
x_spacing = 300
y_spacing = 20
# probabilities for RNN
frame = cv2.line(frame, (x_bar,y_bar), (x_bar + int(lengthRNN[0]), y_bar), (255,0,255), 5)
frame = cv2.line(frame, (x_bar,y_bar+y_spacing), (x_bar + int(lengthRNN[1]), y_bar+y_spacing), (255,0,255), 5)
frame = cv2.line(frame, (x_bar,y_bar+y_spacing*2), (x_bar + int(lengthRNN[2]), y_bar+y_spacing*2), (255,0,255), 5)
# probabilities for TCNN
frame = cv2.line(frame, (x_bar+x_spacing, y_bar), (x_bar+x_spacing + int(lengthTCNN[0]), y_bar), (255,0,255), 5)
frame = cv2.line(frame, (x_bar+x_spacing, y_bar+y_spacing), (x_bar+x_spacing + int(lengthTCNN[1]), y_bar+y_spacing), (255,0,255), 5)
frame = cv2.line(frame, (x_bar+x_spacing, y_bar+y_spacing*2), (x_bar+x_spacing + int(lengthTCNN[2]), y_bar+y_spacing*2), (255,0,255), 5)
# Print predictions beside the probability bars
# Print ground truth
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, groundTruth, (225,20), font, 0.8, (0,255,0), 1, cv2.LINE_AA)
x_pred = 175
y_pred = 40
# Print predictions from RNN
cv2.putText(frame, 'TS-LSTM', (x_pred,y_pred), font, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoRNN_1st[1], (x_pred, y_pred+y_spacing), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoRNN_2nd[1], (x_pred, y_pred+y_spacing*2), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoRNN_3rd[1], (x_pred, y_pred+y_spacing*3), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
# Print predictions from TCNN
cv2.putText(frame, 'Temporal-Inception', (x_pred+x_spacing, y_pred), font, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoTCNN_1st[1], (x_pred+x_spacing, y_pred+y_spacing), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoTCNN_2nd[1], (x_pred+x_spacing, y_pred+y_spacing*2), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
cv2.putText(frame, videoInfoTCNN_3rd[1], (x_pred+x_spacing, y_pred+y_spacing*3), font, 0.5, (255,255,0), 1, cv2.LINE_AA)
# write the processed frame
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
cv2.destroyAllWindows()
# release output
out.release()
|
mit
|
discosultan/quake-console
|
Samples/Sandbox/Lib/lib2to3/pgen2/token.py
|
353
|
1244
|
#! /usr/bin/env python
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
|
mit
|
rmcgibbo/msmbuilder
|
msmbuilder/tests/test_vmhmm.py
|
2
|
3951
|
from __future__ import print_function, division
import numpy as np
from msmbuilder.hmm import VonMisesHMM
from msmbuilder.example_datasets import AlanineDipeptide
from msmbuilder.featurizer import DihedralFeaturizer
from scipy.stats.distributions import vonmises
from itertools import permutations
import random
def test_1():
# creates a 4-state HMM on the ALA2 data. Nothing fancy, just makes
# sure the code runs without erroring out
dataset = AlanineDipeptide().get()
trajectories = dataset.trajectories
topology = trajectories[0].topology
indices = topology.select('symbol C or symbol O or symbol N')
featurizer = DihedralFeaturizer(['phi', 'psi'], trajectories[0][0])
sequences = featurizer.transform(trajectories)
hmm = VonMisesHMM(n_states=4, n_init=1)
hmm.fit(sequences)
assert len(hmm.timescales_ == 3)
assert np.any(hmm.timescales_ > 50)
def circwrap(x):
"""Wrap an array on (-pi, pi)"""
return x - 2*np.pi*np.floor(x/(2*np.pi)+0.5)
def create_timeseries(means, kappas, transmat):
"""Construct a random timeseries based on a specified Markov model."""
numStates = len(means)
state = random.randint(0, numStates-1)
cdf = np.cumsum(transmat, 1)
numFrames = 1000
X = np.empty((numFrames, 1))
for i in range(numFrames):
rand = random.random()
state = (cdf[state] > rand).argmax()
X[i,0] = circwrap(vonmises.rvs(kappas[state], means[state]))
return X
def validate_timeseries(means, kappas, transmat, model, meantol, kappatol, transmattol):
"""Validate that the model we identified matches the one used to create the timeseries."""
numStates = len(means)
assert len(model.means_) == numStates
assert (model.transmat_ >= 0.0).all()
assert (model.transmat_ <= 1.0).all()
totalProbability = sum(model.transmat_.T)
assert (abs(totalProbability-1.0) < 1e-5).all()
# The states may have come out in a different order, so we need to test all possible permutations.
for order in permutations(range(len(means))):
match = True
for i in range(numStates):
if abs(circwrap(means[i]-model.means_[order[i]])) > meantol:
match = False
break
if abs(kappas[i]-model.kappas_[order[i]]) > kappatol:
match = False
break
for j in range(numStates):
if abs(transmat[i,j]-model.transmat_[order[i],order[j]]) > transmattol:
match = False
break
if match:
# It matches.
return
# No permutation matched.
assert False
def test_2():
transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
means = np.array([[0.0], [2.0]])
kappas = np.array([[4.0], [8.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(10)]
# For each value of various options, create a 2 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=2, reversible_type=reversible_type, thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.05)
assert abs(model.fit_logprob_[-1]-model.score(X)) < 0.5
def test_3():
transmat = np.array([[0.2, 0.3, 0.5], [0.4, 0.4, 0.2], [0.8, 0.2, 0.0]])
means = np.array([[0.0], [2.0], [4.0]])
kappas = np.array([[8.0], [8.0], [6.0]])
X = [create_timeseries(means, kappas, transmat) for i in range(20)]
# For each value of various options, create a 3 state HMM and see if it is correct.
for reversible_type in ('mle', 'transpose'):
model = VonMisesHMM(n_states=3, reversible_type=reversible_type, thresh=1e-4, n_iter=30)
model.fit(X)
validate_timeseries(means, kappas, transmat, model, 0.1, 0.5, 0.1)
assert abs(model.fit_logprob_[-1]-model.score(X)) < 0.5
|
lgpl-2.1
|
evaschalde/odoo
|
addons/membership/report/__init__.py
|
432
|
1079
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_membership
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nathanielcompton/actuallypittsburgh
|
actuallypittsburgh/ap_bot.py
|
1
|
1562
|
"""
Run through comments in a specified list of subreddits,
look for typos, and if a typo is found,
post a new comment with the corrected spelling.
"""
import praw
import time
import logging
from . import ap_config
logger = logging.getLogger(__name__)
# User agent info so Reddit can track the bot
r = praw.Reddit(user_agent=ap_config.user_agent)
r.login(ap_config.USERNAME, ap_config.PASSWORD)
already_corrected = []
def check_comments_for_typos():
# Get the subs
subreddit = r.get_subreddit(i for i in ap_config.subreddit_list)
try:
# Get the comments
comments = subreddit.get_comments(ap_config.comment_get_limit)
for comment in comments:
comment_text = comment.body.lower()
is_typo = any(string in comment_text for string in ap_config.typo_list)
if comment.id not in already_corrected and is_typo:
print("Typo found. Comment ID:" + comment.id)
try:
comment.reply("I think you meant 'Pittsburgh.'")
print("Replied.")
already_corrected.append(comment.id)
print("ID logged.\n Searching again...")
except Exception:
logger.warning("Reply failed!", exc_info=True)
except Exception:
logger.exception("Unable to login.", exc_info=True)
if __name__ == "__main__":
check_comments_for_typos()
# Sleep between queries to prevent excessive calls to the Reddit servers
time.sleep(ap_config.sleep_length)
|
mit
|
sigma-random/numpy
|
tools/win32build/prepare_bootstrap.py
|
81
|
3648
|
from __future__ import division, print_function
import os
import subprocess
import shutil
from os.path import join as pjoin, split as psplit, dirname
from zipfile import ZipFile
import re
def get_sdist_tarball():
"""Return the name of the installer built by wininst command."""
# Yeah, the name logic is harcoded in distutils. We have to reproduce it
# here
name = "numpy-%s.zip" % get_numpy_version()
return name
def build_sdist():
cwd = os.getcwd()
try:
os.chdir('../..')
cmd = ["python", "setup.py", "sdist", "--format=zip"]
subprocess.call(cmd)
except Exception as e:
raise RuntimeError("Error while executing cmd (%s)" % e)
finally:
os.chdir(cwd)
def prepare_numpy_sources(bootstrap = 'bootstrap'):
zid = ZipFile(pjoin('..', '..', 'dist', get_sdist_tarball()))
root = 'numpy-%s' % get_numpy_version()
# From the sdist-built tarball, extract all files into bootstrap directory,
# but removing the numpy-VERSION head path
for name in zid.namelist():
cnt = zid.read(name)
if name.startswith(root):
# XXX: even on windows, the path sep in zip is '/' ?
name = name.split('/', 1)[1]
newname = pjoin(bootstrap, name)
if not os.path.exists(dirname(newname)):
os.makedirs(dirname(newname))
fid = open(newname, 'wb')
fid.write(cnt)
def prepare_nsis_script(bootstrap, pyver, numver):
tpl = os.path.join('nsis_scripts', 'numpy-superinstaller.nsi.in')
source = open(tpl, 'r')
target = open(pjoin(bootstrap, 'numpy-superinstaller.nsi'), 'w')
installer_name = 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
cnt = "".join(source.readlines())
cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
for arch in ['nosse', 'sse2', 'sse3']:
cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
get_binary_name(arch))
target.write(cnt)
def prepare_bootstrap(pyver):
bootstrap = "bootstrap-%s" % pyver
if os.path.exists(bootstrap):
shutil.rmtree(bootstrap)
os.makedirs(bootstrap)
build_sdist()
prepare_numpy_sources(bootstrap)
shutil.copy('build.py', bootstrap)
prepare_nsis_script(bootstrap, pyver, get_numpy_version())
def get_binary_name(arch):
return "numpy-%s-%s.exe" % (get_numpy_version(), arch)
def get_numpy_version(chdir = pjoin('..', '..')):
cwd = os.getcwd()
try:
if not chdir:
chdir = cwd
os.chdir(chdir)
version = subprocess.Popen(['python', '-c', 'import __builtin__; __builtin__.__NUMPY_SETUP__ = True; from numpy.version import version;print version'], stdout = subprocess.PIPE).communicate()[0]
version = version.strip()
if 'dev' in version:
out = subprocess.Popen(['svn', 'info'], stdout = subprocess.PIPE).communicate()[0]
r = re.compile('Revision: ([0-9]+)')
svnver = None
for line in out.split('\n'):
m = r.match(line)
if m:
svnver = m.group(1)
if not svnver:
raise ValueError("Error while parsing svn version ?")
version += svnver
finally:
os.chdir(cwd)
return version
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-p", "--pyver", dest="pyver",
help = "Python version (2.4, 2.5, etc...)")
opts, args = parser.parse_args()
pyver = opts.pyver
if not pyver:
pyver = "2.5"
prepare_bootstrap(pyver)
|
bsd-3-clause
|
DJMuggs/ansible-modules-extras
|
notification/twilio.py
|
8
|
5726
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Makai <matthew.makai@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
version_added: "1.6"
module: twilio
short_description: Sends a text message to a mobile phone through Twilio.
description:
- Sends a text message to a phone number through the Twilio messaging API.
notes:
- This module is non-idempotent because it sends an email through the
external API. It is idempotent only in the case that the module fails.
- Like the other notification modules, this one requires an external
dependency to work. In this case, you'll need a Twilio account with
a purchased or verified phone number to send the text message.
options:
account_sid:
description:
user's Twilio account token found on the account page
required: true
auth_token:
description: user's Twilio authentication token
required: true
msg:
description:
the body of the text message
required: true
to_number:
description:
one or more phone numbers to send the text message to,
format +15551112222
required: true
from_number:
description:
the Twilio number to send the text message from, format +15551112222
required: true
media_url:
description:
a URL with a picture, video or sound clip to send with an MMS
(multimedia message) instead of a plain SMS
required: false
author: '"Matt Makai (@makaimc)" <matthew.makai@gmail.com>'
'''
EXAMPLES = '''
# send an SMS about the build status to (555) 303 5681
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: "All servers with webserver role are now configured."
account_sid: "ACXXXXXXXXXXXXXXXXX"
auth_token: "ACXXXXXXXXXXXXXXXXX"
from_number: "+15552014545"
to_number: "+15553035681"
delegate_to: localhost
# send an SMS to multiple phone numbers about the deployment
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: "This server's configuration is now complete."
account_sid: "ACXXXXXXXXXXXXXXXXX"
auth_token: "ACXXXXXXXXXXXXXXXXX"
from_number: "+15553258899"
to_number:
- "+15551113232"
- "+12025551235"
- "+19735559010"
delegate_to: localhost
# send an MMS to a single recipient with an update on the deployment
# and an image of the results
# note: replace account_sid and auth_token values with your credentials
# and you have to have the 'from_number' on your Twilio account
- twilio:
msg: "Deployment complete!"
account_sid: "ACXXXXXXXXXXXXXXXXX"
auth_token: "ACXXXXXXXXXXXXXXXXX"
from_number: "+15552014545"
to_number: "+15553035681"
media_url: "https://demo.twilio.com/logo.png"
delegate_to: localhost
'''
# =======================================
# twilio module support methods
#
try:
import urllib, urllib2
except ImportError:
module.fail_json(msg="urllib and urllib2 are required")
import base64
def post_twilio_api(module, account_sid, auth_token, msg, from_number,
to_number, media_url=None):
URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
% (account_sid,)
AGENT = "Ansible"
data = {'From':from_number, 'To':to_number, 'Body':msg}
if media_url:
data['MediaUrl'] = media_url
encoded_data = urllib.urlencode(data)
request = urllib2.Request(URI)
base64string = base64.encodestring('%s:%s' % \
(account_sid, auth_token)).replace('\n', '')
request.add_header('User-Agent', AGENT)
request.add_header('Content-type', 'application/x-www-form-urlencoded')
request.add_header('Accept', 'application/json')
request.add_header('Authorization', 'Basic %s' % base64string)
return urllib2.urlopen(request, encoded_data)
# =======================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_sid=dict(required=True),
auth_token=dict(required=True),
msg=dict(required=True),
from_number=dict(required=True),
to_number=dict(required=True),
media_url=dict(default=None, required=False),
),
supports_check_mode=True
)
account_sid = module.params['account_sid']
auth_token = module.params['auth_token']
msg = module.params['msg']
from_number = module.params['from_number']
to_number = module.params['to_number']
media_url = module.params['media_url']
if not isinstance(to_number, list):
to_number = [to_number]
for number in to_number:
try:
post_twilio_api(module, account_sid, auth_token, msg,
from_number, number, media_url)
except Exception:
module.fail_json(msg="unable to send message to %s" % number)
module.exit_json(msg=msg, changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
nvoron23/statsmodels
|
statsmodels/sandbox/tsa/diffusion2.py
|
38
|
13366
|
""" Diffusion 2: jump diffusion, stochastic volatility, stochastic time
Created on Tue Dec 08 15:03:49 2009
Author: josef-pktd following Meucci
License: BSD
contains:
CIRSubordinatedBrownian
Heston
IG
JumpDiffusionKou
JumpDiffusionMerton
NIG
VG
References
----------
Attilio Meucci, Review of Discrete and Continuous Processes in Finance: Theory and Applications
Bloomberg Portfolio Research Paper No. 2009-02-CLASSROOM July 1, 2009
http://papers.ssrn.com/sol3/papers.cfm?abstract_id=1373102
this is currently mostly a translation from matlab of
http://www.mathworks.com/matlabcentral/fileexchange/23554-review-of-discrete-and-continuous-processes-in-finance
license BSD:
Copyright (c) 2008, Attilio Meucci
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
TODO:
* vectorize where possible
* which processes are exactly simulated by finite differences ?
* include or exclude (now) the initial observation ?
* convert to and merge with diffusion.py (part 1 of diffusions)
* which processes can be easily estimated ?
loglike or characteristic function ?
* tests ? check for possible index errors (random indices), graphs look ok
* adjust notation, variable names, more consistent, more pythonic
* delete a few unused lines, cleanup
* docstrings
random bug (showed up only once, need fuzz-testing to replicate)
File "...\diffusion2.py", line 375, in <module>
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
File "...\diffusion2.py", line 129, in simulate
jumps_ts[n] = CumS[Events]
IndexError: index out of bounds
CumS is empty array, Events == -1
"""
import numpy as np
#from scipy import stats # currently only uses np.random
import matplotlib.pyplot as plt
class JumpDiffusionMerton(object):
'''
Example
-------
mu=.00 # deterministic drift
sig=.20 # Gaussian component
l=3.45 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
X = JumpDiffusionMerton().simulate(mu,sig,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(X.T)
plt.title('Merton jump-diffusion')
'''
def __init__(self):
pass
def simulate(self, m,s,lambd,a,D,ts,nrepl):
T = ts[-1] # time points
# simulate number of jumps
n_jumps = np.random.poisson(lambd*T, size=(nrepl, 1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t = T*np.random.rand(n_jumps[j])#,1) #uniform
t = np.sort(t,0)
# simulate jump size
S = a + D*np.random.randn(n_jumps[j],1)
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = np.sum(t<=ts[n])-1
#print n, Events, CumS.shape, jumps_ts.shape
jumps_ts[n]=0
if Events > 0:
jumps_ts[n] = CumS[Events] #TODO: out of bounds see top
#jumps = np.column_stack((jumps, jumps_ts)) #maybe wrong transl
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.randn(nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class JumpDiffusionKou(object):
def __init__(self):
pass
def simulate(self, m,s,lambd,p,e1,e2,ts,nrepl):
T=ts[-1]
# simulate number of jumps
N = np.random.poisson(lambd*T,size =(nrepl,1))
jumps=[]
nobs=len(ts)
jumps=np.zeros((nrepl,nobs))
for j in range(nrepl):
# simulate jump arrival time
t=T*np.random.rand(N[j])
t=np.sort(t)
# simulate jump size
ww = np.random.binomial(1, p, size=(N[j]))
S = ww * np.random.exponential(e1, size=(N[j])) - \
(1-ww) * np.random.exponential(e2, N[j])
# put things together
CumS = np.cumsum(S)
jumps_ts = np.zeros(nobs)
for n in range(nobs):
Events = sum(t<=ts[n])-1
jumps_ts[n]=0
if Events:
jumps_ts[n]=CumS[Events]
jumps[j,:] = jumps_ts
D_Diff = np.zeros((nrepl,nobs))
for k in range(nobs):
Dt=ts[k]
if k>1:
Dt=ts[k]-ts[k-1]
D_Diff[:,k]=m*Dt + s*np.sqrt(Dt)*np.random.normal(size=nrepl)
x = np.hstack((np.zeros((nrepl,1)),np.cumsum(D_Diff,1)+jumps))
return x
class VG(object):
'''variance gamma process
'''
def __init__(self):
pass
def simulate(self, m,s,kappa,ts,nrepl):
T=len(ts)
dXs = np.zeros((nrepl,T))
for t in range(T):
dt=ts[1]-0
if t>1:
dt = ts[t]-ts[t-1]
#print dt/kappa
#TODO: check parameterization of gamrnd, checked looks same as np
d_tau = kappa * np.random.gamma(dt/kappa,1.,size=(nrepl))
#print s*np.sqrt(d_tau)
# this raises exception:
#dX = stats.norm.rvs(m*d_tau,(s*np.sqrt(d_tau)))
# np.random.normal requires scale >0
dX = np.random.normal(loc=m*d_tau, scale=1e-6+s*np.sqrt(d_tau))
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x
class IG(object):
'''inverse-Gaussian ??? used by NIG
'''
def __init__(self):
pass
def simulate(self, l,m,nrepl):
N = np.random.randn(nrepl,1)
Y = N**2
X = m + (.5*m*m/l)*Y - (.5*m/l)*np.sqrt(4*m*l*Y+m*m*(Y**2))
U = np.random.rand(nrepl,1)
ind = U>m/(X+m)
X[ind] = m*m/X[ind]
return X.ravel()
class NIG(object):
'''normal-inverse-Gaussian
'''
def __init__(self):
pass
def simulate(self, th,k,s,ts,nrepl):
T = len(ts)
DXs = np.zeros((nrepl,T))
for t in range(T):
Dt=ts[1]-0
if t>1:
Dt=ts[t]-ts[t-1]
l = 1/k*(Dt**2)
m = Dt
DS = IG().simulate(l,m,nrepl)
N = np.random.randn(nrepl)
DX = s*N*np.sqrt(DS) + th*DS
#print DS.shape, DX.shape, DXs.shape
DXs[:,t] = DX
x = np.cumsum(DXs,1)
return x
class Heston(object):
'''Heston Stochastic Volatility
'''
def __init__(self):
pass
def simulate(self, m, kappa, eta,lambd,r, ts, nrepl,tratio=1.):
T = ts[-1]
nobs = len(ts)
dt = np.zeros(nobs) #/tratio
dt[0] = ts[0]-0
dt[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB_1 = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2u = np.sqrt(dt) * np.random.randn(nrepl,nobs)
dB_2 = r*dB_1 + np.sqrt(1-r**2)*dB_2u
vt = eta*np.ones(nrepl)
v=[]
dXs = np.zeros((nrepl,nobs))
vts = np.zeros((nrepl,nobs))
for t in range(nobs):
dv = kappa*(eta-vt)*dt[t]+ lambd*np.sqrt(vt)*dB_2[:,t]
dX = m*dt[t] + np.sqrt(vt*dt[t]) * dB_1[:,t]
vt = vt + dv
vts[:,t] = vt
dXs[:,t] = dX
x = np.cumsum(dXs,1)
return x, vts
class CIRSubordinatedBrownian(object):
'''CIR subordinated Brownian Motion
'''
def __init__(self):
pass
def simulate(self, m, kappa, T_dot,lambd,sigma, ts, nrepl):
T = ts[-1]
nobs = len(ts)
dtarr = np.zeros(nobs) #/tratio
dtarr[0] = ts[0]-0
dtarr[1:] = np.diff(ts)
DXs = np.zeros((nrepl,nobs))
dB = np.sqrt(dtarr) * np.random.randn(nrepl,nobs)
yt = 1.
dXs = np.zeros((nrepl,nobs))
dtaus = np.zeros((nrepl,nobs))
y = np.zeros((nrepl,nobs))
for t in range(nobs):
dt = dtarr[t]
dy = kappa*(T_dot-yt)*dt + lambd*np.sqrt(yt)*dB[:,t]
yt = np.maximum(yt+dy,1e-10) # keep away from zero ?
dtau = np.maximum(yt*dt, 1e-6)
dX = np.random.normal(loc=m*dtau, scale=sigma*np.sqrt(dtau))
y[:,t] = yt
dtaus[:,t] = dtau
dXs[:,t] = dX
tau = np.cumsum(dtaus,1)
x = np.cumsum(dXs,1)
return x, tau, y
def schout2contank(a,b,d):
th = d*b/np.sqrt(a**2-b**2)
k = 1/(d*np.sqrt(a**2-b**2))
s = np.sqrt(d/np.sqrt(a**2-b**2))
return th,k,s
if __name__ == '__main__':
#Merton Jump Diffusion
#^^^^^^^^^^^^^^^^^^^^^
# grid of time values at which the process is evaluated
#("0" will be added, too)
nobs = 252.#1000 #252.
ts = np.linspace(1./nobs, 1., nobs)
nrepl=5 # number of simulations
mu=.010 # deterministic drift
sigma = .020 # Gaussian component
lambd = 3.45 *10 # Poisson process arrival rate
a=0 # drift of log-jump
D=.2 # st.dev of log-jump
jd = JumpDiffusionMerton()
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
sigma = 0.2
lambd = 3.45
x = jd.simulate(mu,sigma,lambd,a,D,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('Merton jump-diffusion')
#Kou jump diffusion
#^^^^^^^^^^^^^^^^^^
mu=.0 # deterministic drift
lambd=4.25 # Poisson process arrival rate
p=.5 # prob. of up-jump
e1=.2 # parameter of up-jump
e2=.3 # parameter of down-jump
sig=.2 # Gaussian component
x = JumpDiffusionKou().simulate(mu,sig,lambd,p,e1,e2,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('double exponential (Kou jump diffusion)')
#variance-gamma
#^^^^^^^^^^^^^^
mu = .1 # deterministic drift in subordinated Brownian motion
kappa = 1. #10. #1 # inverse for gamma shape parameter
sig = 0.5 #.2 # s.dev in subordinated Brownian motion
x = VG().simulate(mu,sig,kappa,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo
plt.title('variance gamma')
#normal-inverse-Gaussian
#^^^^^^^^^^^^^^^^^^^^^^^
# (Schoutens notation)
al = 2.1
be = 0
de = 1
# convert parameters to Cont-Tankov notation
th,k,s = schout2contank(al,be,de)
x = NIG().simulate(th,k,s,ts,nrepl)
plt.figure()
plt.plot(x.T) #Todo x-axis
plt.title('normal-inverse-Gaussian')
#Heston Stochastic Volatility
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^
m=.0
kappa = .6 # 2*Kappa*Eta>Lambda^2
eta = .3**2
lambd =.25
r = -.7
T = 20.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, vts = Heston().simulate(m,kappa, eta,lambd,r, tsh, nrepl, tratio=20.)
plt.figure()
plt.plot(x.T)
plt.title('Heston Stochastic Volatility')
plt.figure()
plt.plot(np.sqrt(vts).T)
plt.title('Heston Stochastic Volatility - CIR Vol.')
plt.figure()
plt.subplot(2,1,1)
plt.plot(x[0])
plt.title('Heston Stochastic Volatility process')
plt.subplot(2,1,2)
plt.plot(np.sqrt(vts[0]))
plt.title('CIR Volatility')
#CIR subordinated Brownian
#^^^^^^^^^^^^^^^^^^^^^^^^^
m=.1
sigma=.4
kappa=.6 # 2*Kappa*T_dot>Lambda^2
T_dot=1
lambd=1
#T=252*10
#dt=1/252
#nrepl=2
T = 10.
nobs = 252.*T#1000 #252.
tsh = np.linspace(T/nobs, T, nobs)
x, tau, y = CIRSubordinatedBrownian().simulate(m, kappa, T_dot,lambd,sigma, tsh, nrepl)
plt.figure()
plt.plot(tsh, x.T)
plt.title('CIRSubordinatedBrownian process')
plt.figure()
plt.plot(tsh, y.T)
plt.title('CIRSubordinatedBrownian - CIR')
plt.figure()
plt.plot(tsh, tau.T)
plt.title('CIRSubordinatedBrownian - stochastic time ')
plt.figure()
plt.subplot(2,1,1)
plt.plot(tsh, x[0])
plt.title('CIRSubordinatedBrownian process')
plt.subplot(2,1,2)
plt.plot(tsh, y[0], label='CIR')
plt.plot(tsh, tau[0], label='stoch. time')
plt.legend(loc='upper left')
plt.title('CIRSubordinatedBrownian')
#plt.show()
|
bsd-3-clause
|
logithr/django-htpayway
|
htpayway/tests.py
|
1
|
2885
|
# -*- coding: utf-8 -*-
from django.test import TestCase, RequestFactory
from django.core import exceptions
from django.contrib.auth.models import AnonymousUser
from .utils import get_payway_class, begin_transaction, format_amount, PayWay
from .models import Transaction
from decimal import Decimal, InvalidOperation
class CustomPayWay(PayWay):
pgw_shop_id = '123'
pgw_secret_key = 'secretkey'
pgw_success_url = u'http://localhost:8000/payway/success/'
pgw_failure_url = u'http://localhost:8000/payway/failure/'
pgw_authorization_type = '0'
pgw_language = 'hr'
class TestImports(TestCase):
def test_missing_setting_raises(self):
with self.assertRaises(exceptions.ImproperlyConfigured):
get_payway_class('')
def test_get_payment_class_with_string(self):
pw = get_payway_class('htpayway.tests.CustomPayWay')()
self.assertEqual(pw.pgw_shop_id, '123')
def test_get_payment_class_with_class(self):
pw = get_payway_class(CustomPayWay)()
self.assertEqual(pw.pgw_shop_id, '123')
class TestPayWay(TestCase):
def setUp(self):
self.payway = CustomPayWay()
def test_create_signature_for_create(self):
request = RequestFactory().get('/')
request.user = AnonymousUser()
transaction = begin_transaction(
request, {'pgw_order_id': '1', 'amount': '123.00'},
htpayway_class=CustomPayWay)
self.assertEqual(
transaction.pgw_signature,
'fc424eb91bb260f8364326629b72de6ef7471cf4d09dc3c998657119cd0df2af' +
'1f313c21108659e87573b0b6525c74f223b0378ab65dbf3e9ffd84697c31b319'
)
def test_pgw_arguments_are_initialized(self):
p = CustomPayWay(pgw_email='a@a.com')
self.assertEqual(p.pgw_email, 'a@a.com')
def test_non_pgw_arguments_are_skipped(self):
p = CustomPayWay(foo=1)
self.assertFalse(hasattr(p, 'foo'))
def test_pgw_data_from_model(self):
p = Transaction(id=1, pgw_transaction_id=2, pgw_amount='300')
pgw_data = p.pgw_data()
self.assertNotIn('id', pgw_data)
self.assertEqual(pgw_data['pgw_transaction_id'], 2)
self.assertEqual(pgw_data['pgw_amount'], '300')
def test_pgw_data_from_class(self):
p = CustomPayWay(pgw_email='a@a.com')
self.assertEqual(p.pgw_data()['pgw_email'], 'a@a.com')
class TestUtils(TestCase):
def test_format_amount_raises_on_non_decimal_input(self):
with self.assertRaises(InvalidOperation):
format_amount('')
with self.assertRaises(TypeError):
format_amount(None)
def test_format_amount_with_2_decimal_places(self):
self.assertEqual(format_amount('123.45'), '12345')
def test_format_amount_with_3_decimal_places(self):
self.assertEqual(format_amount(Decimal('1000.123')), '100012')
|
mit
|
wil/pyroman
|
pyroman/nat.py
|
1
|
5751
|
#Copyright (c) 2011 Erich Schubert erich@debian.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyroman import Firewall
from util import Util
from port import Port, PortInvalidSpec
from chain import Chain
from exception import PyromanException
class Nat:
"""
Represents a Network Address Translation rule.
"""
def __init__(self, client, server, ip, port, dport, dir, loginfo):
"""
Create a new NAT rule
client -- clients allowed to access this NAT rule
server -- host nick the NAT is applied to
ip -- IP that is used in NAT
port -- Ports that are used in NAT
dport -- Destination port for single port redirections
dir -- incoming, outgoing or bidirecitonal NAT
Note that the NAT is always applied to the "server" host, the UI
accessible function is responsible to eventually exchange client
and server for "outgoing" NATs (where the naming of client, server
makes more sense the other way, think of workstations accessing
web server via a NAT)
"""
if server == "":
raise PyromanException("Nat lacking a server host (client: %s, server: %s, ip: %s) at %s" % (client, server, ip, loginfo))
if ip == "":
raise PyromanException("Nat lacking IP address: (client: %s, server: %s) at %s" % (client, server, loginfo))
if dir not in ["in", "out", "both"]:
raise PyromanException("Nat with invalid direction: (client: %s, server: %s, ip: %s, dir: %s) at %s" % (client, server, ip, dir, loginfo))
if not Util.verify_ip4(ip):
raise PyromanException("Nat with invalid IP address: (client: %s, server: %s, ip: %s) at %s" % (client, server, ip, loginfo))
if port:
try:
self.port = Port(port)
except PortInvalidSpec:
raise PyromanException("Nat port specification invalid: (client: %s, server: %s, ip: %s, port: %s) at %s " % (client, server, ip, port, loginfo))
if not self.port.forIPv4():
raise PyromanException("Non-IPv4 port specified: "+port)
else:
self.port = None
if dport:
try:
self.dport = Port(dport)
except PortInvalidSpec:
raise PyromanException("Nat dport specification invalid: (client: %s, server: %s, ip: %s, port: %s, dport: %s) at %s " % (client, server, ip, port, dport, loginfo))
if not self.dport.forIPv4():
raise PyromanException("Non-IPv4 port specified: "+dport)
else:
self.dport = None
if self.dport and not (self.port.proto == self.dport.proto):
raise PyromanException("Nat ports have different protocols: (client: %s, server: %s, ip: %s, port: %s, dport: %s) at %s" % (client, server, ip, port, dport, loginfo))
if dport and not port:
raise PyromanException("Nat with destination port, but no source port: (client: %s, server: %s, ip: %s, dport: %s) at %s" % (client, server, ip, dport, loginfo))
self.client = Util.splitter.split(client)
self.server = Util.splitter.split(server)
self.ip = ip
# port, dport are set above
self.dir = dir
self.loginfo = loginfo
def gen_snat(self, client, server):
"""
Internal helper function, with client, server objects
"""
iff = client.iface.get_filter("d")
target = "SNAT --to-source %s" % self.ip
# do we have a port restriction?
pfilter = ""
if self.port and self.dport:
pfilter = self.dport.get_filter_proto() + " " + self.dport.get_filter_port("s")
target = target + ":%s" % self.port.port
elif self.port:
pfilter = self.port.get_filter_proto() + " " + self.port.get_filter_port("s")
c = Firewall.chains["natPOST"]
for sip in server.ip:
filter = iff[0] + " -s %s" % sip
c.append4("%s %s -j %s" % (filter, pfilter, target), self.loginfo)
def gen_dnat(self, client, server):
"""
Internal helper function, with client, server objects
"""
iff = client.iface.get_filter("s")
filter = iff[0] + " -d %s" % self.ip
# do we have a port restriction?
pfilter = ""
if self.port:
pfilter = self.port.get_filter_proto() + " " + self.port.get_filter_port("d")
c = Firewall.chains["natPRE"]
for sip in server.ip:
target = "DNAT --to-destination %s" % sip
if self.dport:
target = target + ":%s" % self.dport.port
c.append4("%s %s -j %s" % (filter, pfilter, target), self.loginfo)
def generate(self):
for c in self.client:
for s in self.server:
client = Firewall.hosts[c]
server = Firewall.hosts[s]
# sanity checks, that should be moved to "verify"
if not client or not server:
raise PyromanException("Client or server not found for NAT defined at %s" % self.loginfo)
if client.iface == server.iface:
raise PyromanException("client interface and server interface match (i.e. cannot NAT!) for NAT defined at %s" % self.loginfo)
if self.dir in ["in", "both"]:
self.gen_dnat(client, server)
if self.dir in ["out", "both"]:
self.gen_snat(client, server)
|
mit
|
TeamMac/android_kernel_huawei_p6-u06
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
tonihr/pyGeo
|
Geometrias/PuntoGeodesico.py
|
1
|
6474
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''!
Created on 5/2/2015
@author: Antonio Hermosilla Rodrigo.
@contact: anherro285@gmail.com
@organization: Antonio Hermosilla Rodrigo.
@copyright: (C) 2015 by Antonio Hermosilla Rodrigo
@version: 1.0.0
'''
from Geometrias.Angulo import Angulo
class PuntoGeodesico(object):
'''!
Clase destinada a lamacenar la información de un punto geodésico.
Ejemplos de declaración del un objeto de la clase:\n
p=PuntoGeodesico(40,-1)
p=PuntoGeodesico(40,-1,10)
'''
__lat=None
__lon=None
__h=None
def __init__(self, *args):
'''!
Constructor de la clase PuntoGeodesico.
'''
if len(args)==0:
pass
elif len(args)==2:
self.setLatitud(args[0])
self.setLongitud(args[1])
elif len(args)==3:
self.setLatitud(args[0])
self.setLongitud(args[1])
self.setAlturaElipsoidal(args[2])
else:
raise Exception("La clase PuntoGeodesico recibe 2 o 3 parametros como argumentos.\nSe han introducido: "+str(len(args))+" parametros.")
def setLatitud(self,Latitud):
'''!
@brief: Método para introducir la latitud del punto geodésico.
@param Latitud Angulo|float|int|str: Valor de la latitud.
@note Latitud: Si se introduce la latitud como un objeto de la clase Angulo, asegurarse de que el ángulo es de formato latitud.
@exception: Se producira una excepcion si el valor introducido no es de la clase de Angulo, o el valor introducido no es un número convertible a la clase Angulo.
'''
if isinstance(Latitud,Angulo):
if Latitud.getFormato()=="latitud":
self.__lat=Latitud
else:
raise Exception("El formato del ángulo introducido no es de tipo latitud")
elif isinstance(Latitud,float) or isinstance(Latitud,int) or isinstance(Latitud,str):
try:
aux=float(Latitud)
Angulo(aux,formato="latitud")
except Exception as e:
raise Exception(e)
finally:
self.__lat=Angulo(aux,formato="latitud")
def setLongitud(self,Longitud):
'''!
@brief: Método para introducir la longitud del punto geodésico.
@param Longitud Angulo|float|int|str: Valor de la longitud.
@note Longitud: Si se introduce la longitud como un objeto de la clase Angulo, asegurarse de que el ángulo es de formato longitud180 o longitud360.
@exception: Se producira una excepcion si el valor introducido no es de la clase de Angulo, o el valor introducido no es un número convertible a la clase Angulo.
'''
if isinstance(Longitud,Angulo):
if Longitud.getFormato()=="longitud180" or Longitud.getFormato()=="longitud360":
self.__lon=Longitud
else:
raise Exception("El formato del ángulo introducido no es de tipo longitud180 o longitud360")
elif isinstance(Longitud,float) or isinstance(Longitud,int) or isinstance(Longitud,str):
try:
aux=float(Longitud)
except Exception as e:
raise Exception(e)
if aux>-180 and aux<=180:
self.__lon=Angulo(aux,formato="longitud180")
elif aux >0 and aux<360:
self.__lon=Angulo(aux,formato="longitud360")
else:
raise Exception("La longitud no se puede asociar a ningun tipo conocido.")
def setAlturaElipsoidal(self,AlturaElipsoidal):
'''!
@brief: Método para introducir la altura elipsoidal del punto geodésico.
@param AlturaElipsoidal float|int|str: Valor de la altura elipsoidal.
@exception: Se producira una excepción si no se puede convertir el valor introdicidoa un número.
'''
if AlturaElipsoidal==None:
return
if isinstance(AlturaElipsoidal,float) or isinstance(AlturaElipsoidal,int) or isinstance(AlturaElipsoidal,str):
try:
aux=float(AlturaElipsoidal)
except Exception as e:
raise Exception(e)
finally:
self.__h=aux
def getLatitud(self):
'''!
@brief: Método que devuleve la Latitud del punto.
@return float: Latitud del punto.
'''
return self.__lat.getAngulo()
def getLongitud(self):
'''!
@brief: Método que devuleve la Longitud del punto.
@return float: Longitud del punto.
'''
return self.__lon.getAngulo()
def getAlturaElipsoidal(self):
'''!
@brief: Método que devuleve la altura elipsoidal del punto.
@return float: Altura elipsoidal del punto.
'''
return self.__h
def toString(self):
'''!
@brief: Método que devuleve toda la información del punto en formato str.
@return str: Un string con toda la información del punto.
'''
return "Latitud:"+str(self.getLatitud())+"\n"\
"Longitud:"+str(self.getLongitud())+"\n"\
"Altura Elipsoidal:"+str(self.getAlturaElipsoidal())+"\n"\
def toJSON(self):
'''!
@brief: Método que devuleve toda la información del punto en formato JSON.
@return str: Un string en formato JSON.
'''
return "{\n"+\
'"latitud":'+'"'+str(self.getLatitud())+'"'+",\n"\
'"longitud":'+'"'+str(self.getLongitud())+'"'+",\n"\
'"altura elispoidal":'+'"'+str(self.getAlturaElipsoidal())+'"'+"\n"\
+"}"
def toGeoJSON(self):
'''!
@brief: Método que devuleve un GeoJSON del punto.
@return str: Un string en formato JSON.
'''
return "{\n"+\
'"type":"Point"'+",\n"\
'"coordinates":'+\
'['+str(self.getLongitud())+','+str(self.getLatitud())+']'+"\n"\
"}"
def main():
p=PuntoGeodesico('10','180',50)
print(p.toString())
print(p.toJSON())
print(p.toGeoJSON())
import json
print(json.loads(p.toGeoJSON())['coordinates'])
if __name__=="__main__":
main()
|
gpl-2.0
|
dosiecki/NewsBlur
|
vendor/feedvalidator/channel.py
|
16
|
9663
|
"""$Id: channel.py 711 2006-10-25 00:43:41Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 711 $"
__date__ = "$Date: 2006-10-25 00:43:41 +0000 (Wed, 25 Oct 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import *
from itunes import itunes_channel
from extension import *
#
# channel element.
#
class channel(validatorBase, rfc2396, extension_channel, itunes_channel):
def __init__(self):
self.link=None
validatorBase.__init__(self)
def validate(self):
if not "description" in self.children:
self.log(MissingDescription({"parent":self.name,"element":"description"}))
if not "link" in self.children:
self.log(MissingLink({"parent":self.name, "element":"link"}))
if not "title" in self.children:
self.log(MissingTitle({"parent":self.name, "element":"title"}))
if not "dc_language" in self.children and not "language" in self.children:
if not self.xmlLang:
self.log(MissingDCLanguage({"parent":self.name, "element":"language"}))
if self.children.count("image") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"image"}))
if self.children.count("textInput") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"textInput"}))
if self.children.count("skipHours") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipHours"}))
if self.children.count("skipDays") > 1:
self.log(DuplicateElement({"parent":self.name, "element":"skipDays"}))
if self.attrs.has_key((rdfNS,"about")):
self.value = self.attrs.getValue((rdfNS, "about"))
rfc2396.validate(self, extraParams={"attr": "rdf:about"})
if not "items" in self.children:
self.log(MissingElement({"parent":self.name, "element":"items"}))
if self.itunes: itunes_channel.validate(self)
def do_image(self):
from image import image
return image(), noduplicates()
def do_textInput(self):
from textInput import textInput
return textInput(), noduplicates()
def do_textinput(self):
if not self.attrs.has_key((rdfNS,"about")):
# optimize for RSS 2.0. If it is not valid RDF, assume that it is
# a simple misspelling (in other words, the error message will be
# less than helpful on RSS 1.0 feeds.
self.log(UndefinedElement({"parent":self.name, "element":"textinput"}))
return eater(), noduplicates()
def do_link(self):
return link(), noduplicates()
def do_title(self):
return nonhtml(), noduplicates(), nonblank()
def do_description(self):
return nonhtml(), noduplicates()
def do_blink(self):
return blink(), noduplicates()
def do_atom_author(self):
from author import author
return author()
def do_atom_category(self):
from category import category
return category()
def do_atom_contributor(self):
from author import author
return author()
def do_atom_generator(self):
from generator import generator
return generator(), nonblank(), noduplicates()
def do_atom_id(self):
return rfc2396_full(), noduplicates()
def do_atom_icon(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_link(self):
from link import link
return link()
def do_atom_logo(self):
return nonblank(), rfc2396(), noduplicates()
def do_atom_title(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_subtitle(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_rights(self):
from content import textConstruct
return textConstruct(), noduplicates()
def do_atom_updated(self):
return rfc3339(), noduplicates()
def do_dc_creator(self):
if "managingEditor" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return text() # duplicates allowed
def do_dc_subject(self):
if "category" in self.children:
self.log(DuplicateSemantics({"core":"category", "ext":"dc:subject"}))
return text() # duplicates allowed
def do_dc_date(self):
if "pubDate" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return w3cdtf(), noduplicates()
def do_cc_license(self):
if "creativeCommons_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return eater()
def do_creativeCommons_license(self):
if "cc_license" in self.children:
self.log(DuplicateSemantics({"core":"creativeCommons:license", "ext":"cc:license"}))
return rfc2396_full()
class rss20Channel(channel):
def do_item(self):
from item import rss20Item
return rss20Item()
def do_category(self):
return category()
def do_cloud(self):
return cloud(), noduplicates()
do_rating = validatorBase.leaf # TODO test cases?!?
def do_ttl(self):
return positiveInteger(), nonblank(), noduplicates()
def do_docs(self):
return rfc2396_full(), noduplicates()
def do_generator(self):
if "admin_generatorAgent" in self.children:
self.log(DuplicateSemantics({"core":"generator", "ext":"admin:generatorAgent"}))
return text(), noduplicates()
def do_pubDate(self):
if "dc_date" in self.children:
self.log(DuplicateSemantics({"core":"pubDate", "ext":"dc:date"}))
return rfc822(), noduplicates()
def do_managingEditor(self):
if "dc_creator" in self.children:
self.log(DuplicateSemantics({"core":"managingEditor", "ext":"dc:creator"}))
return email(), noduplicates()
def do_webMaster(self):
if "dc_publisher" in self.children:
self.log(DuplicateSemantics({"core":"webMaster", "ext":"dc:publisher"}))
return email(), noduplicates()
def do_language(self):
if "dc_language" in self.children:
self.log(DuplicateSemantics({"core":"language", "ext":"dc:language"}))
return iso639(), noduplicates()
def do_copyright(self):
if "dc_rights" in self.children:
self.log(DuplicateSemantics({"core":"copyright", "ext":"dc:rights"}))
return nonhtml(), noduplicates()
def do_lastBuildDate(self):
if "dcterms_modified" in self.children:
self.log(DuplicateSemantics({"core":"lastBuildDate", "ext":"dcterms:modified"}))
return rfc822(), noduplicates()
def do_skipHours(self):
from skipHours import skipHours
return skipHours()
def do_skipDays(self):
from skipDays import skipDays
return skipDays()
class rss10Channel(channel):
def getExpectedAttrNames(self):
return [(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about'),
(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#', u'about')]
def prevalidate(self):
if self.attrs.has_key((rdfNS,"about")):
if not "abouts" in self.dispatcher.__dict__:
self.dispatcher.__dict__["abouts"] = []
self.dispatcher.__dict__["abouts"].append(self.attrs[(rdfNS,"about")])
def do_items(self): # this actually should be from the rss1.0 ns
if not self.attrs.has_key((rdfNS,"about")):
self.log(MissingAttribute({"parent":self.name, "element":self.name, "attr":"rdf:about"}))
from item import items
return items(), noduplicates()
def do_rdfs_label(self):
return text()
def do_rdfs_comment(self):
return text()
class link(rfc2396_full):
def validate(self):
self.parent.link = self.value
rfc2396_full.validate(self)
class blink(text):
def validate(self):
self.log(NoBlink({}))
class category(nonhtml):
def getExpectedAttrNames(self):
return [(None, u'domain')]
class cloud(validatorBase):
def getExpectedAttrNames(self):
return [(None, u'domain'), (None, u'path'), (None, u'registerProcedure'),
(None, u'protocol'), (None, u'port')]
def prevalidate(self):
if (None, 'domain') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"domain"}))
try:
if int(self.attrs.getValue((None, 'port'))) <= 0:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except KeyError:
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
except ValueError:
self.log(InvalidIntegerAttribute({"parent":self.parent.name, "element":self.name, "attr":'port'}))
if (None, 'path') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"path"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"path"}))
if (None, 'registerProcedure') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"registerProcedure"}))
if (None, 'protocol') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
else:
self.log(ValidCloud({"parent":self.parent.name, "element":self.name, "attr":"protocol"}))
## TODO - is there a list of accepted protocols for this thing?
return validatorBase.prevalidate(self)
|
mit
|
Max-Vader/namebench
|
nb_third_party/dns/tokenizer.py
|
246
|
17962
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS master file format"""
import cStringIO
import sys
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {
' ' : True,
'\t' : True,
'\n' : True,
';' : True,
'(' : True,
')' : True,
'"' : True }
_QUOTING_DELIMITERS = { '"' : True }
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""Raised when an attempt is made to unget a token when the unget
buffer is full."""
pass
class Token(object):
"""A DNS master file format token.
@ivar ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
def __init__(self, ttype, value='', has_escape=False):
"""Initialize a token instance.
@param ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
def is_eof(self):
return self.ttype == EOF
def is_eol(self):
return self.ttype == EOL
def is_whitespace(self):
return self.ttype == WHITESPACE
def is_identifier(self):
return self.ttype == IDENTIFIER
def is_quoted_string(self):
return self.ttype == QUOTED_STRING
def is_comment(self):
return self.ttype == COMMENT
def is_delimiter(self):
return self.ttype == DELIMITER
def is_eol_or_eof(self):
return (self.ttype == EOL or self.ttype == EOF)
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
if not self.has_escape:
return self
unescaped = ''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
unescaped += c
return Token(self.ttype, unescaped)
# compatibility for old-style tuple tokens
def __len__(self):
return 2
def __iter__(self):
return iter((self.ttype, self.value))
def __getitem__(self, i):
if i == 0:
return self.ttype
elif i == 1:
return self.value
else:
raise IndexError
class Tokenizer(object):
"""A DNS master file format tokenizer.
A token is a (type, value) tuple, where I{type} is an int, and
I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
@ivar file: The file to tokenize
@type file: file
@ivar ungotten_char: The most recently ungotten character, or None.
@type ungotten_char: string
@ivar ungotten_token: The most recently ungotten token, or None.
@type ungotten_token: (int, string) token tuple
@ivar multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
@type multiline: int
@ivar quoting: This variable is true if the tokenizer is currently
reading a quoted string.
@type quoting: bool
@ivar eof: This variable is true if the tokenizer has encountered EOF.
@type eof: bool
@ivar delimiters: The current delimiter dictionary.
@type delimiters: dict
@ivar line_number: The current line number
@type line_number: int
@ivar filename: A filename that will be returned by the L{where} method.
@type filename: string
"""
def __init__(self, f=sys.stdin, filename=None):
"""Initialize a tokenizer instance.
@param f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
@type f: file or string
@param filename: the name of the filename that the L{where} method
will return.
@type filename: string
"""
if isinstance(f, str):
f = cStringIO.StringIO(f)
if filename is None:
filename = '<string>'
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
else:
filename = '<file>'
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
self.filename = filename
def _get_char(self):
"""Read a character from input.
@rtype: string
"""
if self.ungotten_char is None:
if self.eof:
c = ''
else:
c = self.file.read(1)
if c == '':
self.eof = True
elif c == '\n':
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
"""Return the current location in the input.
@rtype: (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c):
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
@param c: the character to unget
@type c: string
@raises UngetBufferFull: there is already an ungotten char
"""
if not self.ungotten_char is None:
raise UngetBufferFull
self.ungotten_char = c
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading = False, want_comment = False):
"""Get the next token.
@param want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
@type want_leading: bool
@param want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
@type want_comment: bool
@rtype: Token object
@raises dns.exception.UnexpectedEnd: input ended prematurely
@raises dns.exception.SyntaxError: input was badly formed
"""
if not self.ungotten_token is None:
token = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if want_leading:
return token
elif token.is_comment():
if want_comment:
return token
else:
return token
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
if not self.multiline > 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
while 1:
c = self._get_char()
if c == '\n' or c == '':
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
return Token(EOF)
elif self.multiline:
self.skip_whitespace()
token = ''
continue
else:
return Token(EOL, '\n')
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting:
if c == '\\':
c = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if c.isdigit():
c2 = self._get_char()
if c2 == '':
raise dns.exception.UnexpectedEnd
c3 = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
elif c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == '' or c == '\n':
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
@param token: the token to unget
@type token: Token object
@raises UngetBufferFull: there is already an ungotten token
"""
if not self.ungotten_token is None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
@rtype: (int, string)
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
def __iter__(self):
return self
# Helpers
def get_int(self):
"""Read the next token and interpret it as an integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value)
def get_uint8(self):
"""Read the next token and interpret it as an 8-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
return value
def get_uint16(self):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 65535:
raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
return value
def get_uint32(self):
"""Read the next token and interpret it as a 32-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
value = long(token.value)
if value < 0 or value > 4294967296L:
raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
return value
def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value
def get_identifier(self, origin=None):
"""Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value
def get_name(self, origin=None):
"""Read the next token and interpret it as a DNS name.
@raises dns.exception.SyntaxError:
@rtype: dns.name.Name object"""
token = self.get()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.name.from_text(token.value, origin)
def get_eol(self):
"""Read the next token and raise an exception if it isn't EOL or
EOF.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
return token.value
def get_ttl(self):
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.ttl.from_text(token.value)
|
apache-2.0
|
Jonadabe/letsencrypt
|
letsencrypt-nginx/letsencrypt_nginx/tests/obj_test.py
|
55
|
3978
|
"""Test the helper objects in letsencrypt_nginx.obj."""
import unittest
class AddrTest(unittest.TestCase):
"""Test the Addr class."""
def setUp(self):
from letsencrypt_nginx.obj import Addr
self.addr1 = Addr.fromstring("192.168.1.1")
self.addr2 = Addr.fromstring("192.168.1.1:* ssl")
self.addr3 = Addr.fromstring("192.168.1.1:80")
self.addr4 = Addr.fromstring("*:80 default_server ssl")
self.addr5 = Addr.fromstring("myhost")
self.addr6 = Addr.fromstring("80 default_server spdy")
self.addr7 = Addr.fromstring("unix:/var/run/nginx.sock")
def test_fromstring(self):
self.assertEqual(self.addr1.get_addr(), "192.168.1.1")
self.assertEqual(self.addr1.get_port(), "")
self.assertFalse(self.addr1.ssl)
self.assertFalse(self.addr1.default)
self.assertEqual(self.addr2.get_addr(), "192.168.1.1")
self.assertEqual(self.addr2.get_port(), "*")
self.assertTrue(self.addr2.ssl)
self.assertFalse(self.addr2.default)
self.assertEqual(self.addr3.get_addr(), "192.168.1.1")
self.assertEqual(self.addr3.get_port(), "80")
self.assertFalse(self.addr3.ssl)
self.assertFalse(self.addr3.default)
self.assertEqual(self.addr4.get_addr(), "*")
self.assertEqual(self.addr4.get_port(), "80")
self.assertTrue(self.addr4.ssl)
self.assertTrue(self.addr4.default)
self.assertEqual(self.addr5.get_addr(), "myhost")
self.assertEqual(self.addr5.get_port(), "")
self.assertFalse(self.addr5.ssl)
self.assertFalse(self.addr5.default)
self.assertEqual(self.addr6.get_addr(), "")
self.assertEqual(self.addr6.get_port(), "80")
self.assertFalse(self.addr6.ssl)
self.assertTrue(self.addr6.default)
self.assertEqual(None, self.addr7)
def test_str(self):
self.assertEqual(str(self.addr1), "192.168.1.1")
self.assertEqual(str(self.addr2), "192.168.1.1:* ssl")
self.assertEqual(str(self.addr3), "192.168.1.1:80")
self.assertEqual(str(self.addr4), "*:80 default_server ssl")
self.assertEqual(str(self.addr5), "myhost")
self.assertEqual(str(self.addr6), "80 default_server")
def test_eq(self):
from letsencrypt_nginx.obj import Addr
new_addr1 = Addr.fromstring("192.168.1.1 spdy")
self.assertEqual(self.addr1, new_addr1)
self.assertNotEqual(self.addr1, self.addr2)
self.assertFalse(self.addr1 == 3333)
def test_set_inclusion(self):
from letsencrypt_nginx.obj import Addr
set_a = set([self.addr1, self.addr2])
addr1b = Addr.fromstring("192.168.1.1")
addr2b = Addr.fromstring("192.168.1.1:* ssl")
set_b = set([addr1b, addr2b])
self.assertEqual(set_a, set_b)
class VirtualHostTest(unittest.TestCase):
"""Test the VirtualHost class."""
def setUp(self):
from letsencrypt_nginx.obj import VirtualHost
from letsencrypt_nginx.obj import Addr
self.vhost1 = VirtualHost(
"filep",
set([Addr.fromstring("localhost")]), False, False,
set(['localhost']), [])
def test_eq(self):
from letsencrypt_nginx.obj import Addr
from letsencrypt_nginx.obj import VirtualHost
vhost1b = VirtualHost(
"filep",
set([Addr.fromstring("localhost blah")]), False, False,
set(['localhost']), [])
self.assertEqual(vhost1b, self.vhost1)
self.assertEqual(str(vhost1b), str(self.vhost1))
self.assertFalse(vhost1b == 1234)
def test_str(self):
stringified = '\n'.join(['file: filep', 'addrs: localhost',
"names: set(['localhost'])", 'ssl: False',
'enabled: False'])
self.assertEqual(stringified, str(self.vhost1))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
apache-2.0
|
charlesweir/BrickPython
|
test/TestSensor.py
|
1
|
3630
|
# Tests for Sensor
#
# Copyright (c) 2014 Charles Weir. Shared under the MIT Licence.
import unittest
from BrickPython.BrickPi import PORT_1
from BrickPython.Sensor import Sensor, TouchSensor, UltrasonicSensor, LightSensor
import TestScheduler
class TestSensor(unittest.TestCase):
'Tests for the Sensor classes'
def testSensor(self):
sensor = Sensor( PORT_1 )
self.assertEquals(sensor.port, PORT_1)
assert( sensor.idChar == '1' )
assert( sensor.value() == 0 )
sensor.updateValue( 3 )
assert( sensor.value() == 3 )
def testSensorTextRepresentation(self):
self.assertEquals( repr(Sensor( PORT_1 ) ), 'Sensor 1: 0 (0)')
def testDifferentWaysToInitialize(self):
self.assertEquals( repr(Sensor( '1' ) ), 'Sensor 1: 0 (0)')
self.assertEquals( repr(Sensor( '1', Sensor.COLOR_NONE ) ), 'Sensor 1: 0 (0)')
def testTouchSensor(self):
sensor = TouchSensor( '1' )
self.assertEquals(sensor.port, 0)
self.assertEquals( sensor.idChar, '1' )
self.assertEquals( sensor.value(), True ) # Pressed in
sensor.updateValue( 1000 )
self.assertEquals( sensor.value(), False )
def testTouchSensorTextRepresentation(self):
self.assertEquals( repr(TouchSensor( '1' ) ), 'TouchSensor 1: True (0)')
def testCallbackWhenChanged(self):
result = [True]
def callbackFunc(x):
result[0] = x
sensor = TouchSensor( '1' )
sensor.callbackFunction = callbackFunc
sensor.updateValue( 1000 )
self.assertEquals( result[0], False )
# And no call when it doesn't change
result[0] = True
sensor.updateValue( 1000 )
self.assertEquals( result[0], True )
# But does get a call when it changes back
result[0] = False
sensor.updateValue( 20 )
self.assertEquals( result[0], True )
def testCoroutineWaitingForChange(self):
sensor = TouchSensor( '1' )
coroutine = sensor.waitForChange()
coroutine.next()
coroutine.next()
sensor.updateValue( 1000 )
TestScheduler.TestScheduler.checkCoroutineFinished( coroutine )
def testUltrasonicSensor(self):
sensor = UltrasonicSensor( '1' )
self.assertEquals(sensor.port, 0)
self.assertEquals( sensor.idChar, '1' )
for input, output in {0:UltrasonicSensor.MAX_VALUE, 2:0, 3:5, 4:5, 9:10, 11:10, 14:15, 16:15, 22:20, 23:25, 26:25,
255: UltrasonicSensor.MAX_VALUE
}.items():
for i in xrange(0,UltrasonicSensor.SMOOTHING_RANGE+1):
sensor.updateValue( input ) # Remove effects of smoothing.
self.assertEquals( sensor.value(), output, "Failed with input %d: got %d" %(input,sensor.value()) )
def testUltrasonicSensorSmoothing(self):
sensor = UltrasonicSensor( '1' )
for input in [ 24,14,10,8,10,10,50,10,50,18,50]:
sensor.updateValue( input )
print sensor
self.assertEquals( sensor.value(), 10 )
def testLightSensor(self):
#Light is 680, dark about 800
sensor = LightSensor('4')
self.assertEquals(sensor.port, 3)
self.assertEquals( sensor.idChar, '4' )
for input, output in { 680: LightSensor.LIGHT, 800: LightSensor.DARK
}.items():
sensor.updateValue( input )
self.assertEquals( sensor.value(), output )
self.assertEquals( repr(sensor), "LightSensor 4: 'Dark' (800)")
if __name__ == '__main__':
unittest.main()
|
mit
|
jswope00/GAI
|
common/djangoapps/django_comment_common/models.py
|
15
|
3806
|
import logging
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.utils.translation import ugettext_noop
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
FORUM_ROLE_ADMINISTRATOR = ugettext_noop('Administrator')
FORUM_ROLE_MODERATOR = ugettext_noop('Moderator')
FORUM_ROLE_COMMUNITY_TA = ugettext_noop('Community TA')
FORUM_ROLE_STUDENT = ugettext_noop('Student')
@receiver(post_save, sender=CourseEnrollment)
def assign_default_role_on_enrollment(sender, instance, **kwargs):
"""
Assign forum default role 'Student'
"""
# The code below would remove all forum Roles from a user when they unenroll
# from a course. Concerns were raised that it should apply only to students,
# or that even the history of student roles is important for research
# purposes. Since this was new functionality being added in this release,
# I'm just going to comment it out for now and let the forums team deal with
# implementing the right behavior.
#
# # We've unenrolled the student, so remove all roles for this course
# if not instance.is_active:
# course_roles = list(Role.objects.filter(course_id=instance.course_id))
# instance.user.roles.remove(*course_roles)
# return
# We've enrolled the student, so make sure they have the Student role
assign_default_role(instance.course_id, instance.user)
def assign_default_role(course_id, user):
"""
Assign forum default role 'Student' to user
"""
role, __ = Role.objects.get_or_create(course_id=course_id, name="Student")
user.roles.add(role)
class Role(models.Model):
name = models.CharField(max_length=30, null=False, blank=False)
users = models.ManyToManyField(User, related_name="roles")
course_id = models.CharField(max_length=255, blank=True, db_index=True)
class Meta:
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_role'
def __unicode__(self):
return self.name + " for " + (self.course_id if self.course_id else "all courses")
def inherit_permissions(self, role): # TODO the name of this method is a little bit confusing,
# since it's one-off and doesn't handle inheritance later
if role.course_id and role.course_id != self.course_id:
logging.warning("%s cannot inherit permissions from %s due to course_id inconsistency", \
self, role)
for per in role.permissions.all():
self.add_permission(per)
def add_permission(self, permission):
self.permissions.add(Permission.objects.get_or_create(name=permission)[0])
def has_permission(self, permission):
course_loc = CourseDescriptor.id_to_location(self.course_id)
course = modulestore().get_instance(self.course_id, course_loc)
if self.name == FORUM_ROLE_STUDENT and \
(permission.startswith('edit') or permission.startswith('update') or permission.startswith('create')) and \
(not course.forum_posts_allowed):
return False
return self.permissions.filter(name=permission).exists()
class Permission(models.Model):
name = models.CharField(max_length=30, null=False, blank=False, primary_key=True)
roles = models.ManyToManyField(Role, related_name="permissions")
class Meta:
# use existing table that was originally created from django_comment_client app
db_table = 'django_comment_client_permission'
def __unicode__(self):
return self.name
|
agpl-3.0
|
GiovanniConserva/TestDeploy
|
venv/Lib/site-packages/requests/packages/chardet/langgreekmodel.py
|
2763
|
12628
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
bsd-3-clause
|
benoitsteiner/tensorflow
|
tensorflow/contrib/tensor_forest/hybrid/python/models/k_feature_decisions_to_data_then_nn.py
|
189
|
1874
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a soft decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class KFeatureDecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a soft decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(KFeatureDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.KFeatureDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
apache-2.0
|
ntiufalara/openerp7
|
openerp/addons/sale_stock/stock.py
|
27
|
8014
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', 'Sales Order Line', ondelete='set null', select=True, readonly=True),
}
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
values = super(stock_move, self)._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context)
if picking.sale_id:
values['sale_id'] = picking.sale_id.id
return values
class stock_picking(osv.osv):
_inherit = 'stock.picking'
_columns = {
'sale_id': fields.many2one('sale.order', 'Sales Order', ondelete='set null', select=True),
}
_defaults = {
'sale_id': False
}
def get_currency_id(self, cursor, user, picking):
if picking.sale_id:
return picking.sale_id.pricelist_id.currency_id.id
else:
return super(stock_picking, self).get_currency_id(cursor, user, picking)
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Inherit the original function of the 'stock' module
We select the partner of the sales order as the partner of the customer invoice
"""
if picking.sale_id:
return picking.sale_id.partner_invoice_id
return super(stock_picking, self)._get_partner_to_invoice(cr, uid, picking, context=context)
def _get_comment_invoice(self, cursor, user, picking):
if picking.note or (picking.sale_id and picking.sale_id.note):
return picking.note or picking.sale_id.note
return super(stock_picking, self)._get_comment_invoice(cursor, user, picking)
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Inherit the original function of the 'stock' module in order to override name field
to pass the customer reference form the sales order
"""
invoice_vals = super(stock_picking, self)._prepare_invoice_group(cr, uid, picking, partner, invoice, context)
if picking.sale_id:
invoice_vals['name'] = (invoice.name or '') + ', ' + (picking.sale_id.client_order_ref or '')
return invoice_vals
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Inherit the original function of the 'stock' module in order to override some
values if the picking has been generated by a sales order
"""
invoice_vals = super(stock_picking, self)._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
if picking.sale_id:
invoice_vals['fiscal_position'] = picking.sale_id.fiscal_position.id
invoice_vals['payment_term'] = picking.sale_id.payment_term.id
invoice_vals['user_id'] = picking.sale_id.user_id.id
invoice_vals['name'] = picking.sale_id.client_order_ref or ''
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None):
invoice_vals = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=context)
if picking.sale_id:
if move_line.sale_line_id:
invoice_vals['account_analytic_id'] = self._get_account_analytic_invoice(cr, uid, picking, move_line)
return invoice_vals
def _get_price_unit_invoice(self, cursor, user, move_line, type):
if move_line.sale_line_id and move_line.sale_line_id.product_id.id == move_line.product_id.id:
uom_id = move_line.product_id.uom_id.id
uos_id = move_line.product_id.uos_id and move_line.product_id.uos_id.id or False
price = move_line.sale_line_id.price_unit
coeff = move_line.product_id.uos_coeff
if uom_id != uos_id and coeff != 0:
price_unit = price / coeff
return price_unit
return move_line.sale_line_id.price_unit
return super(stock_picking, self)._get_price_unit_invoice(cursor, user, move_line, type)
def _get_discount_invoice(self, cursor, user, move_line):
if move_line.sale_line_id:
return move_line.sale_line_id.discount
return super(stock_picking, self)._get_discount_invoice(cursor, user, move_line)
def _get_taxes_invoice(self, cursor, user, move_line, type):
if move_line.sale_line_id and move_line.sale_line_id.product_id.id == move_line.product_id.id:
return [x.id for x in move_line.sale_line_id.tax_id]
return super(stock_picking, self)._get_taxes_invoice(cursor, user, move_line, type)
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
if picking.sale_id:
return picking.sale_id.project_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
def _invoice_line_hook(self, cursor, user, move_line, invoice_line_id):
if move_line.sale_line_id:
move_line.sale_line_id.write({'invoice_lines': [(4, invoice_line_id)]})
return super(stock_picking, self)._invoice_line_hook(cursor, user, move_line, invoice_line_id)
def _invoice_hook(self, cursor, user, picking, invoice_id):
sale_obj = self.pool.get('sale.order')
order_line_obj = self.pool.get('sale.order.line')
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
if picking.sale_id:
sale_obj.write(cursor, user, [picking.sale_id.id], {
'invoice_ids': [(4, invoice_id)],
})
for sale_line in picking.sale_id.order_line:
if sale_line.product_id.type == 'service' and not sale_line.invoiced:
vals = order_line_obj._prepare_order_line_invoice_line(cursor, user, sale_line, False)
vals['invoice_id'] = invoice_id
invoice_line_id = invoice_line_obj.create(cursor, user, vals)
order_line_obj.write(cursor, user, [sale_line.id], {
'invoice_lines': [(6, 0, [invoice_line_id])],
})
invoice_obj.button_compute(cursor, user, [invoice_id])
return super(stock_picking, self)._invoice_hook(cursor, user, picking, invoice_id)
# Redefinition of the new field in order to update the model stock.picking.out in the orm
# FIXME: this is a temporary workaround because of a framework bug (ref: lp996816). It should be removed as soon as
# the bug is fixed
class stock_picking_out(osv.osv):
_inherit = 'stock.picking.out'
_columns = {
'sale_id': fields.many2one('sale.order', 'Sale Order',
ondelete='set null', select=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mit
|
aclifton/cpeg853-gem5
|
src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.py
|
15
|
3122
|
# Copyright (c) 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Tushar Krishna
from MemObject import MemObject
from m5.params import *
from m5.proxy import *
class GarnetSyntheticTraffic(MemObject):
type = 'GarnetSyntheticTraffic'
cxx_header = \
"cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh"
block_offset = Param.Int(6, "block offset in bits")
num_dest = Param.Int(1, "Number of Destinations")
memory_size = Param.Int(65536, "memory size")
sim_cycles = Param.Int(1000, "Number of simulation cycles")
num_packets_max = Param.Int(-1, "Max number of packets to send. \
Default is to keep sending till simulation ends")
single_sender = Param.Int(-1, "Send only from this node. \
By default every node sends")
single_dest = Param.Int(-1, "Send only to this dest. \
Default depends on traffic_type")
traffic_type = Param.String("uniform_random", "Traffic type")
inj_rate = Param.Float(0.1, "Packet injection rate")
inj_vnet = Param.Int(-1, "Vnet to inject in. \
0 and 1 are 1-flit, 2 is 5-flit. \
Default is to inject in all three vnets")
precision = Param.Int(3, "Number of digits of precision \
after decimal point")
response_limit = Param.Cycles(5000000, "Cycles before exiting \
due to lack of progress")
test = MasterPort("Port to the memory system to test")
system = Param.System(Parent.any, "System we belong to")
|
bsd-3-clause
|
SmartInfrastructures/fuel-web-dev
|
nailgun/nailgun/orchestrator/provisioning_serializers.py
|
1
|
13570
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provisioning serializers for orchestrator"""
from itertools import groupby
import netaddr
import six
from nailgun import consts
from nailgun.extensions import node_extension_call
from nailgun.logger import logger
from nailgun import objects
from nailgun.orchestrator.priority_serializers import PriorityStrategy
from nailgun.orchestrator import tasks_templates
from nailgun.settings import settings
class ProvisioningSerializer(object):
"""Provisioning serializer"""
@classmethod
def serialize(cls, cluster, nodes, ignore_customized=False):
"""Serialize cluster for provisioning."""
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster.attributes
)
serialized_nodes = []
keyfunc = lambda node: bool(node.replaced_provisioning_info)
for customized, node_group in groupby(nodes, keyfunc):
if customized and not ignore_customized:
serialized_nodes.extend(cls.serialize_customized(node_group))
else:
serialized_nodes.extend(
cls.serialize_nodes(cluster_attrs, node_group))
serialized_info = (cluster.replaced_provisioning_info or
cls.serialize_cluster_info(cluster_attrs, nodes))
serialized_info['fault_tolerance'] = cls.fault_tolerance(cluster,
nodes)
serialized_info['nodes'] = serialized_nodes
return serialized_info
@classmethod
def serialize_cluster_info(cls, cluster_attrs, nodes):
return {
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
'master_ip': settings.MASTER_IP,
}}
@classmethod
def serialize_customized(self, nodes):
serialized = []
for node in nodes:
serialized.append(node.replaced_provisioning_info)
return serialized
@classmethod
def serialize_nodes(cls, cluster_attrs, nodes):
"""Serialize nodes."""
serialized_nodes = []
for node in nodes:
serialized_nodes.append(cls.serialize_node(cluster_attrs, node))
return serialized_nodes
@classmethod
def serialize_node(cls, cluster_attrs, node):
"""Serialize a single node."""
serialized_node = {
'uid': node.uid,
'power_address': node.ip,
'name': objects.Node.get_slave_name(node),
# right now it duplicates to avoid possible issues
'slave_name': objects.Node.get_slave_name(node),
'hostname': objects.Node.get_node_fqdn(node),
'power_pass': cls.get_ssh_key_path(node),
'profile': cluster_attrs['cobbler']['profile'],
'power_type': 'ssh',
'power_user': 'root',
'name_servers': '\"%s\"' % settings.DNS_SERVERS,
'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
'netboot_enabled': '1',
# For provisioning phase
'kernel_options': {
'netcfg/choose_interface':
objects.Node.get_admin_physical_iface(node).mac,
'udevrules': cls.interfaces_mapping_for_udev(node)},
'ks_meta': {
'pm_data': {
'ks_spaces': node_extension_call('get_node_volumes', node),
'kernel_params': objects.Node.get_kernel_params(node)},
'fuel_version': node.cluster.fuel_version,
'puppet_auto_setup': 1,
'puppet_master': settings.PUPPET_MASTER_HOST,
'puppet_enable': 0,
'mco_auto_setup': 1,
'install_log_2_syslog': 1,
'mco_pskey': settings.MCO_PSKEY,
'mco_vhost': settings.MCO_VHOST,
'mco_host': settings.MCO_HOST,
'mco_user': settings.MCO_USER,
'mco_password': settings.MCO_PASSWORD,
'mco_connector': settings.MCO_CONNECTOR,
'mco_enable': 1,
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'authorized_keys':
["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
'master_ip': settings.MASTER_IP,
'timezone': settings.TIMEZONE,
}}
provision_data = cluster_attrs.get('provision')
if provision_data:
if provision_data['method'] == consts.PROVISION_METHODS.image:
serialized_node['ks_meta']['image_data'] = \
provision_data['image_data']
serialized_node['ks_meta']['repo_setup'] = cluster_attrs['repo_setup']
vlan_splinters = cluster_attrs.get('vlan_splinters', {})
if vlan_splinters.get('vswitch') == 'kernel_lt':
serialized_node['ks_meta']['kernel_lt'] = 1
mellanox_data = cluster_attrs.get('neutron_mellanox')
if mellanox_data:
serialized_node['ks_meta'].update({
'mlnx_vf_num': mellanox_data['vf_num'],
'mlnx_plugin_mode': mellanox_data['plugin'],
'mlnx_iser_enabled': cluster_attrs['storage']['iser'],
})
# Add relevant kernel parameter when using Mellanox SR-IOV
# and/or iSER (which works on top of a probed virtual function)
# unless it was explicitly added by the user
pm_data = serialized_node['ks_meta']['pm_data']
if ((mellanox_data['plugin'] == 'ethernet' or
cluster_attrs['storage']['iser'] is True) and
'intel_iommu=' not in pm_data['kernel_params']):
pm_data['kernel_params'] += ' intel_iommu=on'
net_manager = objects.Cluster.get_network_manager(node.cluster)
gw = net_manager.get_default_gateway(node.id)
serialized_node['ks_meta'].update({'gw': gw})
serialized_node['ks_meta'].update(
{'admin_net': net_manager.get_admin_network_group(node.id).cidr}
)
serialized_node.update(cls.serialize_interfaces(node))
return serialized_node
@classmethod
def serialize_interfaces(cls, node):
interfaces = {}
interfaces_extra = {}
net_manager = objects.Cluster.get_network_manager(node.cluster)
admin_ip = net_manager.get_admin_ip_for_node(node.id)
admin_netmask = str(netaddr.IPNetwork(
net_manager.get_admin_network_group(node.id).cidr
).netmask)
for interface in node.nic_interfaces:
name = interface.name
interfaces[name] = {
'mac_address': interface.mac,
'static': '0'}
# interfaces_extra field in cobbler ks_meta
# means some extra data for network interfaces
# configuration. It is used by cobbler snippet.
# For example, cobbler interface model does not
# have 'peerdns' field, but we need this field
# to be configured. So we use interfaces_extra
# branch in order to set this unsupported field.
interfaces_extra[name] = {
'peerdns': 'no',
'onboot': 'no'}
# We want node to be able to PXE boot via any of its
# interfaces. That is why we add all discovered
# interfaces into cobbler system. But we want
# assignted fqdn to be resolved into one IP address
# because we don't completely support multiinterface
# configuration yet.
if interface.mac == objects.Node.\
get_admin_physical_iface(node).mac:
interfaces[name]['dns_name'] = \
objects.Node.get_node_fqdn(node)
interfaces[name]['netmask'] = admin_netmask
interfaces[name]['ip_address'] = admin_ip
interfaces_extra[name]['onboot'] = 'yes'
return {
'interfaces': interfaces,
'interfaces_extra': interfaces_extra}
@classmethod
def interfaces_mapping_for_udev(cls, node):
"""Serialize interfaces mapping for cobbler
:param node: node model
:returns: returns string, example:
00:02:03:04:04_eth0,00:02:03:04:05_eth1
"""
return ','.join((
'{0}_{1}'.format(i.mac, i.name) for i in node.nic_interfaces))
@classmethod
def get_ssh_key_path(cls, node):
"""Assign power pass depend on node state."""
if node.status == "discover":
logger.info(
u'Node %s seems booted with bootstrap image', node.full_name)
return settings.PATH_TO_BOOTSTRAP_SSH_KEY
logger.info(u'Node %s seems booted with real system', node.full_name)
return settings.PATH_TO_SSH_KEY
@classmethod
def fault_tolerance(cls, cluster, nodes):
may_fail = []
roles_metadata = objects.Cluster.get_roles(cluster)
for role in roles_metadata:
if 'fault_tolerance' in roles_metadata[role]:
tolerance = roles_metadata[role]['fault_tolerance']
# only percantage is supported for now
if not tolerance.endswith('%'):
continue
percentage = tolerance[:-1]
uids = []
for node in nodes:
if role in node.roles:
uids.append(node.uid)
may_fail.append({'uids': uids,
'percentage': int(percentage)})
return may_fail
class ProvisioningSerializer61(ProvisioningSerializer):
@classmethod
def serialize(cls, cluster, nodes, ignore_customized=False):
serialized_info = super(ProvisioningSerializer61, cls).serialize(
cluster, nodes, ignore_customized)
serialized_info['pre_provision'] = \
cls.serialize_pre_provision_tasks(cluster)
return serialized_info
@classmethod
def serialize_pre_provision_tasks(cls, cluster):
tasks = []
attrs = objects.Attributes.merged_attrs_values(cluster.attributes)
is_build_images = all([
cluster.release.operating_system == consts.RELEASE_OS.ubuntu,
attrs['provision']['method'] == consts.PROVISION_METHODS.image])
if is_build_images:
tasks.append(
tasks_templates.make_provisioning_images_task(
[consts.MASTER_ROLE],
attrs['repo_setup']['repos'],
attrs['provision'],
cluster.id))
# NOTE(kozhukalov): This pre-provision task is going to be
# removed by 7.0 because we need this only for classic way of
# provision and only until we get rid of it. We are going
# to download debian-installer initrd and kernel just before
# starting actual provisioning.
is_download_debian_installer = all([
cluster.release.operating_system == consts.RELEASE_OS.ubuntu,
attrs['provision']['method'] == consts.PROVISION_METHODS.cobbler])
if is_download_debian_installer:
tasks.append(
tasks_templates.make_download_debian_installer_task(
[consts.MASTER_ROLE],
attrs['repo_setup']['repos'],
attrs['repo_setup']['installer_kernel'],
attrs['repo_setup']['installer_initrd']))
PriorityStrategy().one_by_one(tasks)
return tasks
@classmethod
def serialize_node(cls, cluster_attrs, node):
serialized_node = super(ProvisioningSerializer61, cls).serialize_node(
cluster_attrs, node)
use_fedora = cluster_attrs.get('use_fedora_lt', {})
if use_fedora.get('kernel') == 'fedora_lt_kernel':
serialized_node['ks_meta']['kernel_lt'] = 1
return serialized_node
def get_serializer_for_cluster(cluster):
"""Returns a serializer depends on a given `cluster`.
:param cluster: cluster to process
:returns: a serializer for a given cluster
"""
serializers_map = {
'5': ProvisioningSerializer,
'6.0': ProvisioningSerializer,
}
for version, serializer in six.iteritems(serializers_map):
if cluster.release.environment_version.startswith(version):
return serializer
# by default, we should return latest serializer
return ProvisioningSerializer61
def serialize(cluster, nodes, ignore_customized=False):
"""Serialize cluster for provisioning."""
objects.NodeCollection.prepare_for_provisioning(nodes)
serializer = get_serializer_for_cluster(cluster)
return serializer.serialize(
cluster, nodes, ignore_customized=ignore_customized)
|
apache-2.0
|
renyi533/tensorflow
|
tensorflow/python/compiler/tensorrt/test/lru_cache_test.py
|
3
|
2916
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test LRUCache by running different input batch sizes on same network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class LRUCacheTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, x):
conv_filter = constant_op.constant(
np.random.randn(3, 3, 2, 1), dtype=dtypes.float32)
x = nn.conv2d(
input=x,
filter=conv_filter,
strides=[1, 1, 1, 1],
padding="SAME",
name="conv")
bias = constant_op.constant(
np.random.randn(1, 10, 10, 1), dtype=dtypes.float32)
x = math_ops.add(x, bias)
x = nn.relu(x)
return array_ops.identity(x, name="output")
def GetParams(self):
dtype = dtypes.float32
input_dims = [[[1, 10, 10, 2]], [[2, 10, 10, 2]], [[4, 10, 10, 2]],
[[2, 10, 10, 2]]]
expected_output_dims = [[[1, 10, 10, 1]], [[2, 10, 10, 1]], [[4, 10, 10,
1]],
[[2, 10, 10, 1]]]
return trt_test.TfTrtIntegrationTestParams(
graph_fn=self.GraphFn,
input_specs=[
tensor_spec.TensorSpec([None, 10, 10, 2], dtypes.float32, "input")
],
output_specs=[
tensor_spec.TensorSpec([None, 10, 10, 1], dtypes.float32, "output")
],
input_dims=input_dims,
expected_output_dims=expected_output_dims)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_0"]
def ShouldRunTest(self, run_params):
return (run_params.dynamic_engine and not trt_test.IsQuantizationMode(
run_params.precision_mode)), "test dynamic engine and non-INT8"
if __name__ == "__main__":
test.main()
|
apache-2.0
|
zhinaonet/sqlmap-z
|
thirdparty/chardet/sbcsgroupprober.py
|
2936
|
3291
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
|
gpl-3.0
|
saurabh3949/mxnet
|
python/mxnet/executor.py
|
16
|
20077
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-locals, too-many-arguments
"""Symbolic Executor component of MXNet."""
from __future__ import absolute_import
import ctypes
import copy
import numpy as np
from .base import _LIB
from .base import mx_uint, NDArrayHandle, ExecutorHandle
from .base import check_call, c_array, py_str
from .ndarray import NDArray
from .ndarray import _ndarray_cls
from . import ndarray as nd
# those functions are not used here, we just import them to keep backward compatibility
# in case the end user calls them, as they originally lives here
# pylint: disable=unused-import
from .executor_manager import _split_input_slice, _check_arguments, _load_data, _load_label
def _monitor_callback_wrapper(callback):
"""A wrapper for the user-defined handle."""
def callback_handle(name, array, _):
""" ctypes function """
callback(name, array)
return callback_handle
class Executor(object):
"""Executor is the object providing efficient symbolic graph execution and optimization.
Examples
--------
>>> # typical approach to create an executor is to bind symbol
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])})
"""
def __init__(self, handle, symbol, ctx, grad_req, group2ctx):
"""Constructor, used Symbol.bind and Symbol.simple_bind instead.
Parameters
----------
handle: ExecutorHandle
ExecutorHandle generated by calling `bind`.
See Also
--------
Symbol.bind : to create executor.
"""
if not isinstance(handle, ExecutorHandle):
raise TypeError("Handle type error")
self.handle = handle
self.arg_arrays = []
self.grad_arrays = []
self.aux_arrays = []
self.outputs = self._get_outputs()
self._symbol = copy.deepcopy(symbol)
self._arg_dict = None
self._grad_dict = None
self._aux_dict = None
self._output_dict = None
self._monitor_callback = None
self._ctx = copy.deepcopy(ctx)
self._grad_req = copy.deepcopy(grad_req)
self._group2ctx = copy.deepcopy(group2ctx)
def __del__(self):
check_call(_LIB.MXExecutorFree(self.handle))
@staticmethod
def _get_dict(names, ndarrays):
"""Get the dictionary given name and ndarray pairs."""
nset = set()
for nm in names:
if nm in nset:
raise ValueError('Duplicate names detected, %s' % str(names))
nset.add(nm)
return dict(zip(names, ndarrays))
def _get_outputs(self):
"""List all the output NDArray.
Returns
-------
A list of ndarray bound to the heads of executor.
"""
out_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorOutputs(self.handle,
ctypes.byref(out_size), ctypes.byref(handles)))
num_output = out_size.value
outputs = [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(num_output)]
return outputs
def forward(self, is_train=False, **kwargs):
"""Calculate the outputs specified by the bound symbol.
Parameters
----------
is_train: bool, optional
Whether this forward is for evaluation purpose. If True,
a backward call is expected to follow.
**kwargs
Additional specification of input arguments.
Examples
--------
>>> # doing forward by specifying data
>>> texec.forward(is_train=True, data=mydata)
>>> # doing forward by not specifying things, but copy to the executor before hand
>>> mydata.copyto(texec.arg_dict['data'])
>>> texec.forward(is_train=True)
>>> # doing forward by specifying data and get outputs
>>> outputs = texec.forward(is_train=True, data=mydata)
>>> print(outputs[0].asnumpy())
"""
if len(kwargs) != 0:
arg_dict = self.arg_dict
for name, array in kwargs.items():
if not isinstance(array, (NDArray, np.ndarray)):
raise ValueError('only accept keyword argument of NDArrays and numpy.ndarray')
if name not in arg_dict:
raise TypeError('Unknown argument %s' % name)
if arg_dict[name].shape != array.shape:
raise ValueError('Shape not match! Argument %s, need: %s, received: %s'
%(name, str(arg_dict[name].shape), str(array.shape)))
arg_dict[name][:] = array
check_call(_LIB.MXExecutorForward(
self.handle,
ctypes.c_int(int(is_train))))
return self.outputs
def backward(self, out_grads=None, is_train=True):
"""Do backward pass to get the gradient of arguments.
Parameters
----------
out_grads : NDArray or list of NDArray or dict of str to NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
is_train : bool, default True
Whether this backward is for training or inference. Note that in rare
cases you want to call backward with is_train=False to get gradient
during inference.
Examples
--------
>>> # Example for binding on loss function symbol, which gives the loss value of the model.
>>> # Equivalently it gives the head gradient for backward pass.
>>> # In this example the built-in SoftmaxOutput is used as loss function.
>>> # MakeLoss can be used to define customized loss function symbol.
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6)
>>> net = mx.sym.Activation(net, name='relu', act_type="relu")
>>> net = mx.sym.SoftmaxOutput(net, name='softmax')
>>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)),
>>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))}
>>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))}
>>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print out.asnumpy()
[[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]]
>>> texec.backward()
>>> print(texec.grad_arrays[1].asnumpy())
[[ 0.00378404 0.00378404 0.00378404 0.00378404]
[-0.92399555 -0.92399555 -0.92399555 -0.92399555]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.07600445 0.07600445 0.07600445 0.07600445]
[ 0.20660152 0.20660152 0.20660152 0.20660152]
[ 0.5616011 0.5616011 0.5616011 0.5616011 ]]
>>>
>>> # Example for binding on non-loss function symbol.
>>> # Here the binding symbol is neither built-in loss function
>>> # nor customized loss created by MakeLoss.
>>> # As a result the head gradient is not automatically provided.
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> # c is not a loss function symbol
>>> c = 2 * a + b
>>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])}
>>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))}
>>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad)
>>> out = texec.forward(is_train=True)[0].copy()
>>> print(out.asnumpy())
[ 4. 7.]
>>> # out_grads is the head gradient in backward pass.
>>> # Here we define 'c' as loss function.
>>> # Then 'out' is passed as head gradient of backward pass.
>>> texec.backward(out)
>>> print(texec.grad_arrays[0].asnumpy())
[ 8. 14.]
>>> print(texec.grad_arrays[1].asnumpy())
[ 4. 7.]
"""
if out_grads is None:
out_grads = []
elif isinstance(out_grads, NDArray):
out_grads = [out_grads]
elif isinstance(out_grads, dict):
out_grads = [out_grads[k] for k in self._symbol.list_outputs()]
for obj in out_grads:
if not isinstance(obj, NDArray):
raise TypeError("inputs must be NDArray")
ndarray = c_array(NDArrayHandle, [item.handle for item in out_grads])
check_call(_LIB.MXExecutorBackwardEx(
self.handle,
mx_uint(len(out_grads)),
ndarray,
ctypes.c_int(is_train)))
def set_monitor_callback(self, callback):
"""Install callback for monitor.
Parameters
----------
callback : function
Takes a string and an NDArrayHandle.
Examples
--------
>>> def mon_callback(*args, **kwargs):
>>> print("Do your stuff here.")
>>>
>>> texe.set_monitor_callback(mon_callback)
"""
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXExecutorSetMonitorCallback(
self.handle,
self._monitor_callback,
None))
@property
def arg_dict(self):
"""Get dictionary representation of argument arrrays.
Returns
-------
arg_dict : dict of str to NDArray
The dictionary that maps the names of arguments to NDArrays.
Raises
------
ValueError : if there are duplicated names in the arguments.
"""
if self._arg_dict is None:
self._arg_dict = Executor._get_dict(
self._symbol.list_arguments(), self.arg_arrays)
return self._arg_dict
@property
def grad_dict(self):
"""Get dictionary representation of gradient arrays.
Returns
-------
grad_dict : dict of str to NDArray
The dictionary that maps name of arguments to gradient arrays.
"""
if self._grad_dict is None:
self._grad_dict = Executor._get_dict(
self._symbol.list_arguments(), self.grad_arrays)
return self._grad_dict
@property
def aux_dict(self):
"""Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states.
"""
if self._aux_dict is None:
self._aux_dict = Executor._get_dict(
self._symbol.list_auxiliary_states(), self.aux_arrays)
return self._aux_dict
@property
def output_dict(self):
"""Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
"""
if self._output_dict is None:
self._output_dict = Executor._get_dict(
self._symbol.list_outputs(), self.outputs)
return self._output_dict
def copy_params_from(self, arg_params, aux_params=None, allow_extra_params=False):
"""Copy parameters from arg_params, aux_params into executor's internal array.
Parameters
----------
arg_params : dict of str to NDArray
Parameters, dict of name to NDArray of arguments.
aux_params : dict of str to NDArray, optional
Parameters, dict of name to NDArray of auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Raises
------
ValueError
If there is additional parameters in the dict but ``allow_extra_params=False``.
Examples
--------
>>> # set parameters with existing model checkpoint
>>> model_prefix = 'mx_mlp'
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 0)
>>> texec.copy_params_from(arg_params, aux_params)
"""
for name, array in arg_params.items():
if name in self.arg_dict:
dst = self.arg_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name \"%s\" that is not in the arguments' % name)
if aux_params is None:
return
for name, array in aux_params.items():
if name in self.aux_dict:
dst = self.aux_dict[name]
array.astype(dst.dtype).copyto(dst)
elif not allow_extra_params:
raise ValueError('Find name %s that is not in the auxiliary states' % name)
def reshape(self, partial_shaping=False, allow_up_sizing=False, **kwargs):
"""Return a new executor with the same symbol and shared memory,
but different input/output shapes.
For runtime reshaping, variable length sequences, etc.
The returned executor shares state with the current one,
and cannot be used in parallel with it.
Parameters
----------
partial_shaping : bool
Whether to allow changing the shape of unspecified arguments.
allow_up_sizing : bool
Whether to allow allocating new ndarrays that's larger than the original.
kwargs : dict of string to tuple of int
New shape for arguments.
Returns
-------
exec : Executor
A new executor that shares memory with self.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.Variable('b')
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.zeros((2, 1)), 'b': mx.nd.ones((2,1))})
>>> new_shape = {'a': (4, 2), 'b': (4, 2)}
>>> texec.reshape(allow_up_sizing=True, **new_shape)
"""
# pylint: disable=too-many-branches
arg_shapes, _, aux_shapes = self._symbol.infer_shape(**kwargs)
if arg_shapes is None:
raise ValueError("Insufficient argument shapes provided.")
new_arg_dict = {}
new_grad_dict = {}
for i, name in enumerate(self._symbol.list_arguments()):
new_shape = arg_shapes[i]
arr = self.arg_arrays[i]
darr = None if self.grad_arrays is None else self.grad_arrays[i]
if partial_shaping or name in kwargs or new_shape == arr.shape:
if np.prod(new_shape) > np.prod(arr.shape):
assert allow_up_sizing, "New shape of arg:%s larger than original. "%name + \
"First making a big executor and then down sizing it " + \
"is more efficient than the reverse." + \
"If you really want to up size, set allow_up_sizing=True " + \
"to enable allocation of new arrays."
new_arg_dict[name] = nd.empty(new_shape, ctx=arr.context, dtype=arr.dtype)
if darr is not None:
new_grad_dict[name] = nd.empty(new_shape, ctx=darr.context, dtype=arr.dtype)
else:
new_arg_dict[name] = arr.reshape(new_shape)
if darr is not None:
new_grad_dict[name] = darr.reshape(new_shape)
else:
raise AssertionError("Shape of unspecified array arg:%s changed. "%name + \
"This can cause the new executor to not share parameters " + \
"with the old one. Please check for error in network." +\
"If this is intended, set partial_shaping=True to suppress this warning.")
new_aux_dict = {}
for name, new_shape, arr in zip(self._symbol.list_auxiliary_states(),
aux_shapes, self.aux_arrays):
if partial_shaping or new_shape == arr.shape:
if np.prod(new_shape) > np.prod(arr.shape):
assert allow_up_sizing, "New shape of arg:%s larger than original. "%name + \
"First making a big executor and then down sizing it " + \
"is more efficient than the reverse." + \
"If you really want to up size, set allow_up_sizing=True " + \
"to enable allocation of new arrays."
new_aux_dict[name] = nd.empty(new_shape, ctx=arr.context, dtype=arr.dtype)
else:
new_aux_dict[name] = arr.reshape(new_shape)
else:
raise AssertionError("Shape of unspecified array aux:%s changed. "%name + \
"This can cause the new executor to not share parameters " + \
"with the old one. Please check for error in network." +\
"If this is intended, set partial_shaping=True to suppress this warning.")
return self._symbol.bind(self._ctx,
args=new_arg_dict,
args_grad=new_grad_dict,
grad_req=self._grad_req,
aux_states=new_aux_dict,
group2ctx=self._group2ctx,
shared_exec=self)
def debug_str(self):
"""Get a debug string about internal execution plan.
Returns
-------
debug_str : string
Debug string of the executor.
Examples
--------
>>> a = mx.sym.Variable('a')
>>> b = mx.sym.sin(a)
>>> c = 2 * a + b
>>> texec = c.bind(mx.cpu(), {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])})
>>> print(texec.debug_str())
Symbol Outputs:
output[0]=_plus0(0)
Variable:a
--------------------
Op:_mul_scalar, Name=_mulscalar0
Inputs:
arg[0]=a(0) version=0
Attrs:
scalar=2
--------------------
Op:sin, Name=sin0
Inputs:
arg[0]=a(0) version=0
--------------------
Op:elemwise_add, Name=_plus0
Inputs:
arg[0]=_mulscalar0(0)
arg[1]=sin0(0)
Total 0 MB allocated
Total 11 TempSpace resource requested
"""
debug_str = ctypes.c_char_p()
check_call(_LIB.MXExecutorPrint(
self.handle, ctypes.byref(debug_str)))
return py_str(debug_str.value)
|
apache-2.0
|
tunneln/CarnotKE
|
jyhton/out/production/CarnotKE/shell/jython.py
|
20
|
15050
|
#!/usr/bin/env python2.7 -E
# -*- coding: utf-8 -*-
# Launch script for Jython. It may be wrapped as an executable with
# tools like PyInstaller, creating jython.exe, or run directly. The
# installer will make this the default launcher under the name
# bin/jython if CPython 2.7 is available with the above shebang
# invocation.
import glob
import inspect
import os
import os.path
import pipes
import shlex
import subprocess
import sys
from collections import OrderedDict
is_windows = os.name == "nt" or (os.name == "java" and os._name == "nt")
def parse_launcher_args(args):
class Namespace(object):
pass
parsed = Namespace()
parsed.java = []
parsed.properties = OrderedDict()
parsed.boot = False
parsed.jdb = False
parsed.help = False
parsed.print_requested = False
parsed.profile = False
parsed.jdb = None
it = iter(args)
next(it) # ignore sys.argv[0]
i = 1
while True:
try:
arg = next(it)
except StopIteration:
break
if arg.startswith("-D"):
k, v = arg[2:].split("=")
parsed.properties[k] = v
i += 1
elif arg in ("-J-classpath", "-J-cp"):
try:
next_arg = next(it)
except StopIteration:
bad_option("Argument expected for -J-classpath option")
if next_arg.startswith("-"):
bad_option("Bad option for -J-classpath")
parsed.classpath = next_arg
i += 2
elif arg.startswith("-J-Xmx"):
parsed.mem = arg[2:]
i += 1
elif arg.startswith("-J-Xss"):
parsed.stack = arg[2:]
i += 1
elif arg.startswith("-J"):
parsed.java.append(arg[2:])
i += 1
elif arg == "--print":
parsed.print_requested = True
i += 1
elif arg in ("-h", "--help"):
parsed.help = True
elif arg in ("--boot", "--jdb", "--profile"):
setattr(parsed, arg[2:], True)
i += 1
elif arg == "--":
i += 1
break
else:
break
return parsed, args[i:]
class JythonCommand(object):
def __init__(self, args, jython_args):
self.args = args
self.jython_args = jython_args
@property
def uname(self):
if hasattr(self, "_uname"):
return self._uname
if is_windows:
self._uname = "windows"
else:
uname = subprocess.check_output(["uname"]).strip().lower()
if uname.startswith("cygwin"):
self._uname = "cygwin"
else:
self._uname = uname
return self._uname
@property
def java_home(self):
if not hasattr(self, "_java_home"):
self.setup_java_command()
return self._java_home
@property
def java_command(self):
if not hasattr(self, "_java_command"):
self.setup_java_command()
return self._java_command
def setup_java_command(self):
if self.args.help:
self._java_home = None
self._java_command = "java"
return
if "JAVA_HOME" not in os.environ:
self._java_home = None
self._java_command = "jdb" if self.args.jdb else "java"
else:
self._java_home = os.environ["JAVA_HOME"]
if self.uname == "cygwin":
self._java_command = "jdb" if self.args.jdb else "java"
else:
self._java_command = os.path.join(
self.java_home, "bin",
"jdb" if self.args.jdb else "java")
@property
def executable(self):
"""Path to executable"""
if hasattr(self, "_executable"):
return self._executable
# Modified from
# http://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory-in-python/22881871#22881871
if getattr(sys, "frozen", False): # py2exe, PyInstaller, cx_Freeze
path = os.path.abspath(sys.executable)
else:
def inspect_this(): pass
path = inspect.getabsfile(inspect_this)
self._executable = os.path.realpath(path)
return self._executable
@property
def jython_home(self):
if hasattr(self, "_jython_home"):
return self._jython_home
if "JYTHON_HOME" in os.environ:
self._jython_home = os.environ["JYTHON_HOME"]
else:
self._jython_home = os.path.dirname(os.path.dirname(self.executable))
if self.uname == "cygwin":
self._jython_home = subprocess.check_output(["cygpath", "--windows", self._jython_home]).strip()
return self._jython_home
@property
def jython_opts():
return os.environ.get("JYTHON_OPTS", "")
@property
def classpath_delimiter(self):
return ";" if (is_windows or self.uname == "cygwin") else ":"
@property
def jython_jars(self):
if hasattr(self, "_jython_jars"):
return self._jython_jars
if os.path.exists(os.path.join(self.jython_home, "jython-dev.jar")):
jars = [os.path.join(self.jython_home, "jython-dev.jar")]
if self.args.boot:
# Wildcard expansion does not work for bootclasspath
for jar in glob.glob(os.path.join(self.jython_home, "javalib", "*.jar")):
jars.append(jar)
else:
jars.append(os.path.join(self.jython_home, "javalib", "*"))
elif not os.path.exists(os.path.join(self.jython_home, "jython.jar")):
bad_option("""{jython_home} contains neither jython-dev.jar nor jython.jar.
Try running this script from the 'bin' directory of an installed Jython or
setting {envvar_specifier}JYTHON_HOME.""".format(
jython_home=self.jython_home,
envvar_specifier="%" if self.uname == "windows" else "$"))
else:
jars = [os.path.join(self.jython_home, "jython.jar")]
self._jython_jars = jars
return self._jython_jars
@property
def java_classpath(self):
if hasattr(self.args, "classpath"):
return self.args.classpath
else:
return os.environ.get("CLASSPATH", ".")
@property
def java_mem(self):
if hasattr(self.args, "mem"):
return self.args.mem
else:
return os.environ.get("JAVA_MEM", "-Xmx512m")
@property
def java_stack(self):
if hasattr(self.args, "stack"):
return self.args.stack
else:
return os.environ.get("JAVA_STACK", "-Xss1024k")
@property
def java_opts(self):
return [self.java_mem, self.java_stack]
@property
def java_profile_agent(self):
return os.path.join(self.jython_home, "javalib", "profile.jar")
def set_encoding(self):
if "JAVA_ENCODING" not in os.environ and self.uname == "darwin" and "file.encoding" not in self.args.properties:
self.args.properties["file.encoding"] = "UTF-8"
def convert(self, arg):
if sys.stdout.encoding:
return arg.encode(sys.stdout.encoding)
else:
return arg
def make_classpath(self, jars):
return self.classpath_delimiter.join(jars)
def convert_path(self, arg):
if self.uname == "cygwin":
if not arg.startswith("/cygdrive/"):
new_path = self.convert(arg).replace("/", "\\")
else:
new_path = subprocess.check_output(["cygpath", "-pw", self.convert(arg)]).strip()
return new_path
else:
return self.convert(arg)
@property
def command(self):
self.set_encoding()
args = [self.java_command]
args.extend(self.java_opts)
args.extend(self.args.java)
classpath = self.java_classpath
jython_jars = self.jython_jars
if self.args.boot:
args.append("-Xbootclasspath/a:%s" % self.convert_path(self.make_classpath(jython_jars)))
else:
classpath = self.make_classpath(jython_jars) + self.classpath_delimiter + classpath
args.extend(["-classpath", self.convert_path(classpath)])
if "python.home" not in self.args.properties:
args.append("-Dpython.home=%s" % self.convert_path(self.jython_home))
if "python.executable" not in self.args.properties:
args.append("-Dpython.executable=%s" % self.convert_path(self.executable))
if "python.launcher.uname" not in self.args.properties:
args.append("-Dpython.launcher.uname=%s" % self.uname)
# Determines whether running on a tty for the benefit of
# running on Cygwin. This step is needed because the Mintty
# terminal emulator doesn't behave like a standard Microsoft
# Windows tty, and so JNR Posix doesn't detect it properly.
if "python.launcher.tty" not in self.args.properties:
args.append("-Dpython.launcher.tty=%s" % str(os.isatty(sys.stdin.fileno())).lower())
if self.uname == "cygwin" and "python.console" not in self.args.properties:
args.append("-Dpython.console=org.python.core.PlainConsole")
if self.args.profile:
args.append("-XX:-UseSplitVerifier")
args.append("-javaagent:%s" % self.convert_path(self.java_profile_agent))
for k, v in self.args.properties.iteritems():
args.append("-D%s=%s" % (self.convert(k), self.convert(v)))
args.append("org.python.util.jython")
if self.args.help:
args.append("--help")
args.extend(self.jython_args)
return args
def bad_option(msg):
print >> sys.stderr, """
{msg}
usage: jython [option] ... [-c cmd | -m mod | file | -] [arg] ...
Try `jython -h' for more information.
""".format(msg=msg)
sys.exit(2)
def print_help():
print >> sys.stderr, """
Jython launcher-specific options:
-Dname=value : pass name=value property to Java VM (e.g. -Dpython.path=/a/b/c)
-Jarg : pass argument through to Java VM (e.g. -J-Xmx512m)
--boot : speeds up launch performance by putting Jython jars on the boot classpath
--help : this help message
--jdb : run under JDB java debugger
--print : print the Java command with args for launching Jython instead of executing it
--profile: run with the Java Interactive Profiler (http://jiprof.sf.net)
-- : pass remaining arguments through to Jython
Jython launcher environment variables:
JAVA_MEM : Java memory (sets via -Xmx)
JAVA_OPTS : options to pass directly to Java
JAVA_STACK : Java stack size (sets via -Xss)
JAVA_HOME : Java installation directory
JYTHON_HOME: Jython installation directory
JYTHON_OPTS: default command line arguments
"""
def support_java_opts(args):
it = iter(args)
while it:
arg = next(it)
if arg.startswith("-D"):
yield arg
elif arg in ("-classpath", "-cp"):
yield "-J" + arg
try:
yield next(it)
except StopIteration:
bad_option("Argument expected for -classpath option in JAVA_OPTS")
else:
yield "-J" + arg
# copied from subprocess module in Jython; see
# http://bugs.python.org/issue1724822 where it is discussed to include
# in Python 3.x for shlex:
def cmdline2list(cmdline):
"""Build an argv list from a Microsoft shell style cmdline str
The reverse of list2cmdline that follows the same MS C runtime
rules.
"""
whitespace = ' \t'
# count of preceding '\'
bs_count = 0
in_quotes = False
arg = []
argv = []
for ch in cmdline:
if ch in whitespace and not in_quotes:
if arg:
# finalize arg and reset
argv.append(''.join(arg))
arg = []
bs_count = 0
elif ch == '\\':
arg.append(ch)
bs_count += 1
elif ch == '"':
if not bs_count % 2:
# Even number of '\' followed by a '"'. Place one
# '\' for every pair and treat '"' as a delimiter
if bs_count:
del arg[-(bs_count / 2):]
in_quotes = not in_quotes
else:
# Odd number of '\' followed by a '"'. Place one '\'
# for every pair and treat '"' as an escape sequence
# by the remaining '\'
del arg[-(bs_count / 2 + 1):]
arg.append(ch)
bs_count = 0
else:
# regular char
arg.append(ch)
bs_count = 0
# A single trailing '"' delimiter yields an empty arg
if arg or in_quotes:
argv.append(''.join(arg))
return argv
def decode_args(sys_args):
args = [sys_args[0]]
def get_env_opts(envvar):
opts = os.environ.get(envvar, "")
if is_windows:
return cmdline2list(opts)
else:
return shlex.split(opts)
java_opts = get_env_opts("JAVA_OPTS")
jython_opts = get_env_opts("JYTHON_OPTS")
args.extend(support_java_opts(java_opts))
args.extend(sys_args[1:])
if sys.stdout.encoding:
if sys.stdout.encoding.lower() == "cp65001":
sys.exit("""Jython does not support code page 65001 (CP_UTF8).
Please try another code page by setting it with the chcp command.""")
args = [arg.decode(sys.stdout.encoding) for arg in args]
jython_opts = [arg.decode(sys.stdout.encoding) for arg in jython_opts]
return args, jython_opts
def main(sys_args):
sys_args, jython_opts = decode_args(sys_args)
args, jython_args = parse_launcher_args(sys_args)
jython_command = JythonCommand(args, jython_opts + jython_args)
command = jython_command.command
if args.profile and not args.help:
try:
os.unlink("profile.txt")
except OSError:
pass
if args.print_requested and not args.help:
if jython_command.uname == "windows":
print subprocess.list2cmdline(jython_command.command)
else:
print " ".join(pipes.quote(arg) for arg in jython_command.command)
else:
if not (is_windows or not hasattr(os, "execvp") or args.help or jython_command.uname == "cygwin"):
# Replace this process with the java process.
#
# NB such replacements actually do not work under Windows,
# but if tried, they also fail very badly by hanging.
# So don't even try!
os.execvp(command[0], command[1:])
else:
result = 1
try:
result = subprocess.call(command)
if args.help:
print_help()
except KeyboardInterrupt:
pass
sys.exit(result)
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
|
Vixionar/django
|
django/contrib/gis/geos/prototypes/coordseq.py
|
485
|
3049
|
from ctypes import POINTER, c_double, c_int, c_uint
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
GEOSException, last_arg_byref,
)
# ## Error-checking routines specific to coordinate sequences. ##
def check_cs_op(result, func, cargs):
"Checks the status code of a coordinate sequence operation."
if result == 0:
raise GEOSException('Could not set value on coordinate sequence')
else:
return result
def check_cs_get(result, func, cargs):
"Checking the coordinate sequence retrieval."
check_cs_op(result, func, cargs)
# Object in by reference, return its value.
return last_arg_byref(cargs)
# ## Coordinate sequence prototype factory classes. ##
class CsInt(GEOSFuncFactory):
"For coordinate sequence routines that return an integer."
argtypes = [CS_PTR, POINTER(c_uint)]
restype = c_int
errcheck = staticmethod(check_cs_get)
class CsOperation(GEOSFuncFactory):
"For coordinate sequence operations."
restype = c_int
def get_func(self, ordinate=False, get=False):
if get:
# Get routines have double parameter passed-in by reference.
self.errcheck = check_cs_get
dbl_param = POINTER(c_double)
else:
self.errcheck = check_cs_op
dbl_param = c_double
if ordinate:
# Get/Set ordinate routines have an extra uint parameter.
self.argtypes = [CS_PTR, c_uint, c_uint, dbl_param]
else:
self.argtypes = [CS_PTR, c_uint, dbl_param]
return super(CsOperation, self).get_func()
class CsOutput(GEOSFuncFactory):
restype = CS_PTR
def get_func(self, argtypes):
self.argtypes = argtypes
return super(CsOutput, self).get_func()
@staticmethod
def errcheck(result, func, cargs):
if not result:
raise GEOSException(
'Error encountered checking Coordinate Sequence returned from GEOS '
'C function "%s".' % func.__name__
)
return result
# ## Coordinate Sequence ctypes prototypes ##
# Coordinate Sequence constructors & cloning.
cs_clone = CsOutput('GEOSCoordSeq_clone', [CS_PTR])
create_cs = CsOutput('GEOSCoordSeq_create', [c_uint, c_uint])
get_cs = CsOutput('GEOSGeom_getCoordSeq', [GEOM_PTR])
# Getting, setting ordinate
cs_getordinate = CsOperation('GEOSCoordSeq_getOrdinate', ordinate=True, get=True)
cs_setordinate = CsOperation('GEOSCoordSeq_setOrdinate', ordinate=True)
# For getting, x, y, z
cs_getx = CsOperation('GEOSCoordSeq_getX', get=True)
cs_gety = CsOperation('GEOSCoordSeq_getY', get=True)
cs_getz = CsOperation('GEOSCoordSeq_getZ', get=True)
# For setting, x, y, z
cs_setx = CsOperation('GEOSCoordSeq_setX')
cs_sety = CsOperation('GEOSCoordSeq_setY')
cs_setz = CsOperation('GEOSCoordSeq_setZ')
# These routines return size & dimensions.
cs_getsize = CsInt('GEOSCoordSeq_getSize')
cs_getdims = CsInt('GEOSCoordSeq_getDimensions')
|
bsd-3-clause
|
OpenAcademy-OpenStack/project-docs
|
samples/helloworld/helloworld/openstack/common/eventlet_backdoor.py
|
1
|
4797
|
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from helloworld.openstack.common.gettextutils import _
from helloworld.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
default=None,
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()})
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
apache-2.0
|
benthomasson/ansible
|
lib/ansible/modules/storage/zfs/zfs.py
|
9
|
7895
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots.
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: ['present', 'absent']
required: true
origin:
description:
- Snapshot from which to create a clone
default: null
required: false
key_value:
description:
- The C(zfs) module takes key=value pairs for zfs properties to be set. See the zfs(8) man page for more information.
default: null
required: false
author: "Johan Wiren (@johanwiren)"
'''
EXAMPLES = '''
# Create a new file system called myfs in pool rpool with the setuid property turned off
- zfs:
name: rpool/myfs
state: present
setuid: off
# Create a new volume called myvol in pool rpool.
- zfs:
name: rpool/myvol
state: present
volsize: 10M
# Create a snapshot of rpool/myfs file system.
- zfs:
name: rpool/myfs@mysnapshot
state: present
# Create a new file system called myfs2 with snapdir enabled
- zfs:
name: rpool/myfs2
state: present
snapdir: enabled
# Create a new file system by cloning a snapshot
- zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
# Destroy a filesystem
- zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
volsize = properties.pop('volsize', None)
volblocksize = properties.pop('volblocksize', None)
origin = properties.pop('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if volsize:
cmd += ['-V', volsize]
if volblocksize:
cmd += ['-b', 'volblocksize']
if properties:
for prop, value in properties.items():
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin:
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(type='str', required=True),
state = dict(type='str', required=True, choices=['present', 'absent']),
# No longer used. Kept here to not interfere with zfs properties
createparent = dict(type='bool', required=False)
),
supports_check_mode=True,
check_invalid_arguments=False
)
state = module.params.pop('state')
name = module.params.pop('name')
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.items():
# All freestyle params are zfs properties
if prop not in module.argument_spec:
# Reverse the boolification of freestyle zfs properties
if isinstance(value, bool):
if value is True:
properties[prop] = 'on'
else:
properties[prop] = 'off'
else:
properties[prop] = value
result = {}
result['name'] = name
result['state'] = state
zfs = Zfs(module, name, properties)
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
foodszhang/kbengine
|
kbe/res/scripts/common/Lib/test/test_fileio.py
|
80
|
15460
|
# Adapted from test_file.py by Daniel Stutzbach
import sys
import os
import io
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd, cpython_only
from collections import UserList
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(bytes([1, 2]))
self.f.close()
a = array('b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array('b', [1, 2]), a[:n])
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
self.assertRaises(TypeError, self.f.writelines, "abc")
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def test_reject(self):
self.assertRaises(TypeError, self.f.write, "Hello!")
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode=%r>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode=%r>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read', 'readinto',
'seek', 'tell', 'truncate', 'write', 'seekable',
'readable', 'writable']
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except OSError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised OSError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(OSError) as cm:
_FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised OSError")
finally:
try:
self.f.close()
except OSError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except OSError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array('b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
if sys.platform != "win32":
try:
f = _FileIO("/dev/tty", "a")
except OSError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
pass
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
finally:
os.unlink(TESTFN)
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with _FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testConstructorHandlesNULChars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, _FileIO, fn_with_NUL, 'w')
self.assertRaises(TypeError, _FileIO, bytes(fn_with_NUL, 'ascii'), 'w')
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(OSError, msvcrt.get_osfhandle, make_bad_fd())
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(_FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
lgpl-3.0
|
jangorecki/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_create_frame.py
|
6
|
2597
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from collections import defaultdict
import random
import sys
sys.path.insert(1, "../../")
import h2o
from h2o.exceptions import H2OValueError
from h2o.utils.compatibility import viewvalues
from tests import pyunit_utils
def create_frame_test():
"""Test `h2o.create_frame()`."""
for _ in range(10):
r = random.randint(1, 1000)
c = random.randint(1, 1000)
frame = h2o.create_frame(rows=r, cols=c)
assert frame.nrow == r and frame.ncol == c, \
"Expected {0} rows and {1} cols, but got {2} rows and {3} cols.".format(r, c, frame.nrow, frame.ncol)
def assert_coltypes(frame, freal, fenum, fint, fbin, ftime, fstring):
# The server does not report columns as binary -- instead they are integer.
fint += fbin
fbin = 0
type_counts = defaultdict(int)
for ft in viewvalues(frame.types):
type_counts[ft] += 1
print("Created table with column counts: {%s}" % ", ".join("%s: %d" % t for t in type_counts.items()))
for ct in ["real", "enum", "int", "time", "string"]:
assert abs(type_counts[ct] - locals()["f" + ct] * frame.ncol) < 1, \
"Wrong column count of type %s: %d" % (ct, type_counts[ct])
f1 = h2o.create_frame(rows=10, cols=1000, real_fraction=1)
assert_coltypes(f1, 1, 0, 0, 0, 0, 0)
f2 = h2o.create_frame(rows=10, cols=1000, binary_fraction=0.5, time_fraction=0.5)
assert_coltypes(f2, 0, 0, 0, 0.5, 0.5, 0)
f3 = h2o.create_frame(rows=10, cols=1000, string_fraction=0.2, time_fraction=0.8)
assert_coltypes(f3, 0, 0, 0, 0, 0.8, 0.2)
f4 = h2o.create_frame(rows=10, cols=1000, real_fraction=0.9)
assert_coltypes(f4, 0.9, 0.04, 0.04, 0.02, 0, 0)
f5 = h2o.create_frame(rows=2, cols=1000, integer_fraction=0.75000000000001, string_fraction=0.25000000000001)
assert_coltypes(f5, 0, 0, 0.75, 0, 0, 0.25)
try:
h2o.create_frame(rows=10, cols=1000, real_fraction=0.1, categorical_fraction=0.1, integer_fraction=0.1,
binary_fraction=0.1, time_fraction=0.1, string_fraction=0.1)
assert False, "The data frame should not have been created!"
except H2OValueError:
pass
try:
h2o.create_frame(rows=10, cols=1000, real_fraction=0.5, categorical_fraction=0.5, integer_fraction=0.1)
assert False, "The data frame should not have been created!"
except H2OValueError:
pass
if __name__ == "__main__":
pyunit_utils.standalone_test(create_frame_test)
else:
create_frame_test()
|
apache-2.0
|
HiroIshikawa/21playground
|
microblog/flask/lib/python3.5/site-packages/whoosh/lang/snowball/swedish.py
|
96
|
2760
|
from .bases import _ScandinavianStemmer
from whoosh.compat import u
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = u("aeiouy\xE4\xE5\xF6")
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", u("l\xF6st"), "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", u("l\xF6st")):
word = word[:-1]
break
return word
|
mit
|
vfasky/BlogCatke
|
virtualenv.bundle/requests/packages/chardet2/langhungarianmodel.py
|
25
|
12784
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = { \
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = { \
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
|
apache-2.0
|
frainfreeze/studying
|
home/python/microblog/venv/lib/python3.5/site-packages/pip/_vendor/cachecontrol/filewrapper.py
|
74
|
2533
|
from io import BytesIO
class CallbackFileWrapper(object):
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
"""
def __init__(self, fp, callback):
self.__buf = BytesIO()
self.__fp = fp
self.__callback = callback
def __getattr__(self, name):
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__("_CallbackFileWrapper__fp")
return getattr(fp, name)
def __is_fp_closed(self):
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
return self.__fp.closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self):
if self.__callback:
self.__callback(self.__buf.getvalue())
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
def read(self, amt=None):
data = self.__fp.read(amt)
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt):
data = self.__fp._safe_read(amt)
if amt == 2 and data == b"\r\n":
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
|
mit
|
CLOUGH/info3180-project-4
|
lib/werkzeug/contrib/fixers.py
|
464
|
9949
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
apache-2.0
|
vFense/vFenseAgent-nix
|
agent/deps/rpm6/Python-2.7.5/lib/python2.7/ctypes/test/test_cast.py
|
81
|
3212
|
from ctypes import *
import unittest
import sys
class Test(unittest.TestCase):
def test_array2pointer(self):
array = (c_int * 3)(42, 17, 2)
# casting an array to a pointer works.
ptr = cast(array, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
if 2*sizeof(c_short) == sizeof(c_int):
ptr = cast(array, POINTER(c_short))
if sys.byteorder == "little":
self.assertEqual([ptr[i] for i in range(6)],
[42, 0, 17, 0, 2, 0])
else:
self.assertEqual([ptr[i] for i in range(6)],
[0, 42, 0, 17, 0, 2])
def test_address2pointer(self):
array = (c_int * 3)(42, 17, 2)
address = addressof(array)
ptr = cast(c_void_p(address), POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
ptr = cast(address, POINTER(c_int))
self.assertEqual([ptr[i] for i in range(3)], [42, 17, 2])
def test_p2a_objects(self):
array = (c_char_p * 5)()
self.assertEqual(array._objects, None)
array[0] = "foo bar"
self.assertEqual(array._objects, {'0': "foo bar"})
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
self.assertTrue(p._objects is array._objects)
self.assertEqual(array._objects, {'0': "foo bar", id(array): array})
p[0] = "spam spam"
self.assertEqual(p._objects, {'0': "spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
p[1] = "foo bar"
self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array})
self.assertTrue(array._objects is p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
self.assertEqual(p[:4], [1,2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 3, 4])
self.assertEqual(p[:4:], [1, 2, 3, 4])
self.assertEqual(p[3:-1:-1], [4, 3, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
p[2] = 96
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
c_int()
self.assertEqual(p[:4], [1, 2, 96, 4])
self.assertEqual(p[:4:], [1, 2, 96, 4])
self.assertEqual(p[3:-1:-1], [4, 96, 2, 1])
self.assertEqual(p[:4:3], [1, 4])
def test_char_p(self):
# This didn't work: bad argument to internal function
s = c_char_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
"hiho")
try:
c_wchar_p
except NameError:
pass
else:
def test_wchar_p(self):
s = c_wchar_p("hiho")
self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
"hiho")
if __name__ == "__main__":
unittest.main()
|
lgpl-3.0
|
peergradeio/mongoengine-objectidmapfield
|
setup.py
|
1
|
1158
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
DESCRIPTION = "A MongoEngine MapField that allows and requires ObjectIds as " \
"keys."
try:
LONG_DESCRIPTION = open('README.md').read()
except:
LONG_DESCRIPTION = DESCRIPTION
setup(
name='mongoengine-objectidmapfield',
version='0.0.1',
packages=find_packages(),
author='Malthe Jørgensen',
author_email='malthe.jorgensen@gmail.com',
url='https://github.com/peergradeio/mongoengine-objectidmapfield',
license='BSD 3-Clause',
include_package_data=True,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: BSD 3-clause',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=['mongoengine', 'six'],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
test_suite='tests',
)
|
bsd-3-clause
|
kularny/GeniSys.Kernel
|
toolchain/arm-cortex-a9/share/gcc-4.9.4/python/libstdcxx/v6/printers.py
|
56
|
41551
|
# Pretty-printers for libstdc++.
# Copyright (C) 2008-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import itertools
import re
import sys
### Python 2 + Python 3 compatibility code
# Resources about compatibility:
#
# * <http://pythonhosted.org/six/>: Documentation of the "six" module
# FIXME: The handling of e.g. std::basic_string (at least on char)
# probably needs updating to work with Python 3's new string rules.
#
# In particular, Python 3 has a separate type (called byte) for
# bytestrings, and a special b"" syntax for the byte literals; the old
# str() type has been redefined to always store Unicode text.
#
# We probably can't do much about this until this GDB PR is addressed:
# <https://sourceware.org/bugzilla/show_bug.cgi?id=17138>
if sys.version_info[0] > 2:
### Python 3 stuff
Iterator = object
# Python 3 folds these into the normal functions.
imap = map
izip = zip
# Also, int subsumes long
long = int
else:
### Python 2 stuff
class Iterator:
"""Compatibility mixin for iterators
Instead of writing next() methods for iterators, write
__next__() methods and use this mixin to make them work in
Python 2 as well as Python 3.
Idea stolen from the "six" documentation:
<http://pythonhosted.org/six/#six.Iterator>
"""
def next(self):
return self.__next__()
# In Python 2, we still need these from itertools
from itertools import imap, izip
# Try to use the new-style pretty-printing if available.
_use_gdb_pp = True
try:
import gdb.printing
except ImportError:
_use_gdb_pp = False
# Try to install type-printers.
_use_type_printing = False
try:
import gdb.types
if hasattr(gdb.types, 'TypePrinter'):
_use_type_printing = True
except ImportError:
pass
# Starting with the type ORIG, search for the member type NAME. This
# handles searching upward through superclasses. This is needed to
# work around http://sourceware.org/bugzilla/show_bug.cgi?id=13615.
def find_type(orig, name):
typ = orig.strip_typedefs()
while True:
search = str(typ) + '::' + name
try:
return gdb.lookup_type(search)
except RuntimeError:
pass
# The type was not found, so try the superclass. We only need
# to check the first superclass, so we don't bother with
# anything fancier here.
field = typ.fields()[0]
if not field.is_base_class:
raise ValueError("Cannot find type %s::%s" % (str(orig), name))
typ = field.type
class SharedPointerPrinter:
"Print a shared_ptr or weak_ptr"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
state = 'empty'
refcounts = self.val['_M_refcount']['_M_pi']
if refcounts != 0:
usecount = refcounts['_M_use_count']
weakcount = refcounts['_M_weak_count']
if usecount == 0:
state = 'expired, weak %d' % weakcount
else:
state = 'count %d, weak %d' % (usecount, weakcount - 1)
return '%s (%s) %s' % (self.typename, state, self.val['_M_ptr'])
class UniquePointerPrinter:
"Print a unique_ptr"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
v = self.val['_M_t']['_M_head_impl']
return ('std::unique_ptr<%s> containing %s' % (str(v.type.target()),
str(v)))
class StdListPrinter:
"Print a std::list"
class _iterator(Iterator):
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.head = head.address
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.base == self.head:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.typename = typename
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_node'])
def to_string(self):
if self.val['_M_impl']['_M_node'].address == self.val['_M_impl']['_M_node']['_M_next']:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
class StdListIteratorPrinter:
"Print std::list::iterator"
def __init__(self, typename, val):
self.val = val
self.typename = typename
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdSlistPrinter:
"Print a __gnu_cxx::slist"
class _iterator(Iterator):
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_head']['_M_next']
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val)
def to_string(self):
if self.val['_M_head']['_M_next'] == 0:
return 'empty __gnu_cxx::slist'
return '__gnu_cxx::slist'
class StdSlistIteratorPrinter:
"Print __gnu_cxx::slist::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdVectorPrinter:
"Print a std::vector"
class _iterator(Iterator):
def __init__ (self, start, finish, bitvec):
self.bitvec = bitvec
if bitvec:
self.item = start['_M_p']
self.so = start['_M_offset']
self.finish = finish['_M_p']
self.fo = finish['_M_offset']
itype = self.item.dereference().type
self.isize = 8 * itype.sizeof
else:
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def __next__(self):
count = self.count
self.count = self.count + 1
if self.bitvec:
if self.item == self.finish and self.so >= self.fo:
raise StopIteration
elt = self.item.dereference()
if elt & (1 << self.so):
obit = 1
else:
obit = 0
self.so = self.so + 1
if self.so >= self.isize:
self.item = self.item + 1
self.so = 0
return ('[%d]' % count, obit)
else:
if self.item == self.finish:
raise StopIteration
elt = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % count, elt)
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.is_bool = val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL
def children(self):
return self._iterator(self.val['_M_impl']['_M_start'],
self.val['_M_impl']['_M_finish'],
self.is_bool)
def to_string(self):
start = self.val['_M_impl']['_M_start']
finish = self.val['_M_impl']['_M_finish']
end = self.val['_M_impl']['_M_end_of_storage']
if self.is_bool:
start = self.val['_M_impl']['_M_start']['_M_p']
so = self.val['_M_impl']['_M_start']['_M_offset']
finish = self.val['_M_impl']['_M_finish']['_M_p']
fo = self.val['_M_impl']['_M_finish']['_M_offset']
itype = start.dereference().type
bl = 8 * itype.sizeof
length = (bl - so) + bl * ((finish - start) - 1) + fo
capacity = bl * (end - start)
return ('%s<bool> of length %d, capacity %d'
% (self.typename, int (length), int (capacity)))
else:
return ('%s of length %d, capacity %d'
% (self.typename, int (finish - start), int (end - start)))
def display_hint(self):
return 'array'
class StdVectorIteratorPrinter:
"Print std::vector::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_current'].dereference()
class StdTuplePrinter:
"Print a std::tuple"
class _iterator(Iterator):
def __init__ (self, head):
self.head = head
# Set the base class as the initial head of the
# tuple.
nodes = self.head.type.fields ()
if len (nodes) == 1:
# Set the actual head to the first pair.
self.head = self.head.cast (nodes[0].type)
elif len (nodes) != 0:
raise ValueError("Top of tuple tree does not consist of a single node.")
self.count = 0
def __iter__ (self):
return self
def __next__ (self):
nodes = self.head.type.fields ()
# Check for further recursions in the inheritance tree.
if len (nodes) == 0:
raise StopIteration
# Check that this iteration has an expected structure.
if len (nodes) != 2:
raise ValueError("Cannot parse more than 2 nodes in a tuple tree.")
# - Left node is the next recursion parent.
# - Right node is the actual class contained in the tuple.
# Process right node.
impl = self.head.cast (nodes[1].type)
# Process left node and set it as head.
self.head = self.head.cast (nodes[0].type)
self.count = self.count + 1
# Finally, check the implementation. If it is
# wrapped in _M_head_impl return that, otherwise return
# the value "as is".
fields = impl.type.fields ()
if len (fields) < 1 or fields[0].name != "_M_head_impl":
return ('[%d]' % self.count, impl)
else:
return ('[%d]' % self.count, impl['_M_head_impl'])
def __init__ (self, typename, val):
self.typename = typename
self.val = val;
def children (self):
return self._iterator (self.val)
def to_string (self):
if len (self.val.type.fields ()) == 0:
return 'empty %s' % (self.typename)
return '%s containing' % (self.typename)
class StdStackOrQueuePrinter:
"Print a std::stack or std::queue"
def __init__ (self, typename, val):
self.typename = typename
self.visualizer = gdb.default_visualizer(val['c'])
def children (self):
return self.visualizer.children()
def to_string (self):
return '%s wrapping: %s' % (self.typename,
self.visualizer.to_string())
def display_hint (self):
if hasattr (self.visualizer, 'display_hint'):
return self.visualizer.display_hint ()
return None
class RbtreeIterator(Iterator):
def __init__(self, rbtree):
self.size = rbtree['_M_t']['_M_impl']['_M_node_count']
self.node = rbtree['_M_t']['_M_impl']['_M_header']['_M_left']
self.count = 0
def __iter__(self):
return self
def __len__(self):
return int (self.size)
def __next__(self):
if self.count == self.size:
raise StopIteration
result = self.node
self.count = self.count + 1
if self.count < self.size:
# Compute the next node.
node = self.node
if node.dereference()['_M_right']:
node = node.dereference()['_M_right']
while node.dereference()['_M_left']:
node = node.dereference()['_M_left']
else:
parent = node.dereference()['_M_parent']
while node == parent.dereference()['_M_right']:
node = parent
parent = parent.dereference()['_M_parent']
if node.dereference()['_M_right'] != parent:
node = parent
self.node = node
return result
def get_value_from_Rb_tree_node(node):
"""Returns the value held in an _Rb_tree_node<_Val>"""
try:
member = node.type.fields()[1].name
if member == '_M_value_field':
# C++03 implementation, node contains the value as a member
return node['_M_value_field']
elif member == '_M_storage':
# C++11 implementation, node stores value in __aligned_buffer
p = node['_M_storage']['_M_storage'].address
p = p.cast(node.type.template_argument(0).pointer())
return p.dereference()
except:
pass
raise ValueError("Unsupported implementation for %s" % str(node.type))
# This is a pretty printer for std::_Rb_tree_iterator (which is
# std::map::iterator), and has nothing to do with the RbtreeIterator
# class above.
class StdRbtreeIteratorPrinter:
"Print std::map::iterator"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
typename = str(self.val.type.strip_typedefs()) + '::_Link_type'
nodetype = gdb.lookup_type(typename).strip_typedefs()
node = self.val.cast(nodetype).dereference()
return get_value_from_Rb_tree_node(node)
class StdDebugIteratorPrinter:
"Print a debug enabled version of an iterator"
def __init__ (self, typename, val):
self.val = val
# Just strip away the encapsulating __gnu_debug::_Safe_iterator
# and return the wrapped iterator value.
def to_string (self):
itype = self.val.type.template_argument(0)
return self.val['_M_current'].cast(itype)
class StdMapPrinter:
"Print a std::map or std::multimap"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter(Iterator):
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def __next__(self):
if self.count % 2 == 0:
n = next(self.rbiter)
n = n.cast(self.type).dereference()
n = get_value_from_Rb_tree_node(n)
self.pair = n
item = n['first']
else:
item = self.pair['second']
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
def display_hint (self):
return 'map'
class StdSetPrinter:
"Print a std::set or std::multiset"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter(Iterator):
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def __next__(self):
item = next(self.rbiter)
item = item.cast(self.type).dereference()
item = get_value_from_Rb_tree_node(item)
# FIXME: this is weird ... what to do?
# Maybe a 'set' display hint?
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
class StdBitsetPrinter:
"Print a std::bitset"
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
# If template_argument handled values, we could print the
# size. Or we could use a regexp on the type.
return '%s' % (self.typename)
def children (self):
words = self.val['_M_w']
wtype = words.type
# The _M_w member can be either an unsigned long, or an
# array. This depends on the template specialization used.
# If it is a single long, convert to a single element list.
if wtype.code == gdb.TYPE_CODE_ARRAY:
tsize = wtype.target ().sizeof
else:
words = [words]
tsize = wtype.sizeof
nwords = wtype.sizeof / tsize
result = []
byte = 0
while byte < nwords:
w = words[byte]
bit = 0
while w != 0:
if (w & 1) != 0:
# Another spot where we could use 'set'?
result.append(('[%d]' % (byte * tsize * 8 + bit), 1))
bit = bit + 1
w = w >> 1
byte = byte + 1
return result
class StdDequePrinter:
"Print a std::deque"
class _iter(Iterator):
def __init__(self, node, start, end, last, buffer_size):
self.node = node
self.p = start
self.end = end
self.last = last
self.buffer_size = buffer_size
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.p == self.last:
raise StopIteration
result = ('[%d]' % self.count, self.p.dereference())
self.count = self.count + 1
# Advance the 'cur' pointer.
self.p = self.p + 1
if self.p == self.end:
# If we got to the end of this bucket, move to the
# next bucket.
self.node = self.node + 1
self.p = self.node[0]
self.end = self.p + self.buffer_size
return result
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.elttype = val.type.template_argument(0)
size = self.elttype.sizeof
if size < 512:
self.buffer_size = int (512 / size)
else:
self.buffer_size = 1
def to_string(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
delta_n = end['_M_node'] - start['_M_node'] - 1
delta_s = start['_M_last'] - start['_M_cur']
delta_e = end['_M_cur'] - end['_M_first']
size = self.buffer_size * delta_n + delta_s + delta_e
return '%s with %d elements' % (self.typename, long (size))
def children(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
return self._iter(start['_M_node'], start['_M_cur'], start['_M_last'],
end['_M_cur'], self.buffer_size)
def display_hint (self):
return 'array'
class StdDequeIteratorPrinter:
"Print std::deque::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_cur'].dereference()
class StdStringPrinter:
"Print a std::basic_string of some kind"
def __init__(self, typename, val):
self.val = val
def to_string(self):
# Make sure &string works, too.
type = self.val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Calculate the length of the string so that to_string returns
# the string according to length, not according to first null
# encountered.
ptr = self.val ['_M_dataplus']['_M_p']
realtype = type.unqualified ().strip_typedefs ()
reptype = gdb.lookup_type (str (realtype) + '::_Rep').pointer ()
header = ptr.cast(reptype) - 1
len = header.dereference ()['_M_length']
if hasattr(ptr, "lazy_string"):
return ptr.lazy_string (length = len)
return ptr.string (length = len)
def display_hint (self):
return 'string'
class Tr1HashtableIterator(Iterator):
def __init__ (self, hash):
self.buckets = hash['_M_buckets']
self.bucket = 0
self.bucket_count = hash['_M_bucket_count']
self.node_type = find_type(hash.type, '_Node').pointer()
self.node = 0
while self.bucket != self.bucket_count:
self.node = self.buckets[self.bucket]
if self.node:
break
self.bucket = self.bucket + 1
def __iter__ (self):
return self
def __next__ (self):
if self.node == 0:
raise StopIteration
node = self.node.cast(self.node_type)
result = node.dereference()['_M_v']
self.node = node.dereference()['_M_next'];
if self.node == 0:
self.bucket = self.bucket + 1
while self.bucket != self.bucket_count:
self.node = self.buckets[self.bucket]
if self.node:
break
self.bucket = self.bucket + 1
return result
class StdHashtableIterator(Iterator):
def __init__(self, hash):
self.node = hash['_M_before_begin']['_M_nxt']
self.node_type = find_type(hash.type, '__node_type').pointer()
def __iter__(self):
return self
def __next__(self):
if self.node == 0:
raise StopIteration
elt = self.node.cast(self.node_type).dereference()
self.node = elt['_M_nxt']
valptr = elt['_M_storage'].address
valptr = valptr.cast(elt.type.template_argument(0).pointer())
return valptr.dereference()
class Tr1UnorderedSetPrinter:
"Print a tr1::unordered_set"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def hashtable (self):
if self.typename.startswith('std::tr1'):
return self.val
return self.val['_M_h']
def to_string (self):
return '%s with %d elements' % (self.typename, self.hashtable()['_M_element_count'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = imap (self.format_count, itertools.count())
if self.typename.startswith('std::tr1'):
return izip (counter, Tr1HashtableIterator (self.hashtable()))
return izip (counter, StdHashtableIterator (self.hashtable()))
class Tr1UnorderedMapPrinter:
"Print a tr1::unordered_map"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def hashtable (self):
if self.typename.startswith('std::tr1'):
return self.val
return self.val['_M_h']
def to_string (self):
return '%s with %d elements' % (self.typename, self.hashtable()['_M_element_count'])
@staticmethod
def flatten (list):
for elt in list:
for i in elt:
yield i
@staticmethod
def format_one (elt):
return (elt['first'], elt['second'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = imap (self.format_count, itertools.count())
# Map over the hash table and flatten the result.
if self.typename.startswith('std::tr1'):
data = self.flatten (imap (self.format_one, Tr1HashtableIterator (self.hashtable())))
# Zip the two iterators together.
return izip (counter, data)
data = self.flatten (imap (self.format_one, StdHashtableIterator (self.hashtable())))
# Zip the two iterators together.
return izip (counter, data)
def display_hint (self):
return 'map'
class StdForwardListPrinter:
"Print a std::forward_list"
class _iterator(Iterator):
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
valptr = elt['_M_storage'].address
valptr = valptr.cast(elt.type.template_argument(0).pointer())
return ('[%d]' % count, valptr.dereference())
def __init__(self, typename, val):
self.val = val
self.typename = typename
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_head'])
def to_string(self):
if self.val['_M_impl']['_M_head']['_M_next'] == 0:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
# A "regular expression" printer which conforms to the
# "SubPrettyPrinter" protocol from gdb.printing.
class RxPrinter(object):
def __init__(self, name, function):
super(RxPrinter, self).__init__()
self.name = name
self.function = function
self.enabled = True
def invoke(self, value):
if not self.enabled:
return None
if value.type.code == gdb.TYPE_CODE_REF:
if hasattr(gdb.Value,"referenced_value"):
value = value.referenced_value()
return self.function(self.name, value)
# A pretty-printer that conforms to the "PrettyPrinter" protocol from
# gdb.printing. It can also be used directly as an old-style printer.
class Printer(object):
def __init__(self, name):
super(Printer, self).__init__()
self.name = name
self.subprinters = []
self.lookup = {}
self.enabled = True
self.compiled_rx = re.compile('^([a-zA-Z0-9_:]+)<.*>$')
def add(self, name, function):
# A small sanity check.
# FIXME
if not self.compiled_rx.match(name + '<>'):
raise ValueError('libstdc++ programming error: "%s" does not match' % name)
printer = RxPrinter(name, function)
self.subprinters.append(printer)
self.lookup[name] = printer
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_VERSION.
def add_version(self, base, name, function):
self.add(base + name, function)
self.add(base + '__7::' + name, function)
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
def add_container(self, base, name, function):
self.add_version(base, name, function)
self.add_version(base + '__cxx1998::', name, function)
@staticmethod
def get_basic_type(type):
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
return type.tag
def __call__(self, val):
typename = self.get_basic_type(val.type)
if not typename:
return None
# All the types we match are template types, so we can use a
# dictionary.
match = self.compiled_rx.match(typename)
if not match:
return None
basename = match.group(1)
if val.type.code == gdb.TYPE_CODE_REF:
if hasattr(gdb.Value,"referenced_value"):
val = val.referenced_value()
if basename in self.lookup:
return self.lookup[basename].invoke(val)
# Cannot find a pretty printer. Return None.
return None
libstdcxx_printer = None
class FilteringTypePrinter(object):
def __init__(self, match, name):
self.match = match
self.name = name
self.enabled = True
class _recognizer(object):
def __init__(self, match, name):
self.match = match
self.name = name
self.type_obj = None
def recognize(self, type_obj):
if type_obj.tag is None:
return None
if self.type_obj is None:
if not self.match in type_obj.tag:
# Filter didn't match.
return None
try:
self.type_obj = gdb.lookup_type(self.name).strip_typedefs()
except:
pass
if self.type_obj == type_obj:
return self.name
return None
def instantiate(self):
return self._recognizer(self.match, self.name)
def add_one_type_printer(obj, match, name):
printer = FilteringTypePrinter(match, 'std::' + name)
gdb.types.register_type_printer(obj, printer)
def register_type_printers(obj):
global _use_type_printing
if not _use_type_printing:
return
for pfx in ('', 'w'):
add_one_type_printer(obj, 'basic_string', pfx + 'string')
add_one_type_printer(obj, 'basic_ios', pfx + 'ios')
add_one_type_printer(obj, 'basic_streambuf', pfx + 'streambuf')
add_one_type_printer(obj, 'basic_istream', pfx + 'istream')
add_one_type_printer(obj, 'basic_ostream', pfx + 'ostream')
add_one_type_printer(obj, 'basic_iostream', pfx + 'iostream')
add_one_type_printer(obj, 'basic_stringbuf', pfx + 'stringbuf')
add_one_type_printer(obj, 'basic_istringstream',
pfx + 'istringstream')
add_one_type_printer(obj, 'basic_ostringstream',
pfx + 'ostringstream')
add_one_type_printer(obj, 'basic_stringstream',
pfx + 'stringstream')
add_one_type_printer(obj, 'basic_filebuf', pfx + 'filebuf')
add_one_type_printer(obj, 'basic_ifstream', pfx + 'ifstream')
add_one_type_printer(obj, 'basic_ofstream', pfx + 'ofstream')
add_one_type_printer(obj, 'basic_fstream', pfx + 'fstream')
add_one_type_printer(obj, 'basic_regex', pfx + 'regex')
add_one_type_printer(obj, 'sub_match', pfx + 'csub_match')
add_one_type_printer(obj, 'sub_match', pfx + 'ssub_match')
add_one_type_printer(obj, 'match_results', pfx + 'cmatch')
add_one_type_printer(obj, 'match_results', pfx + 'smatch')
add_one_type_printer(obj, 'regex_iterator', pfx + 'cregex_iterator')
add_one_type_printer(obj, 'regex_iterator', pfx + 'sregex_iterator')
add_one_type_printer(obj, 'regex_token_iterator',
pfx + 'cregex_token_iterator')
add_one_type_printer(obj, 'regex_token_iterator',
pfx + 'sregex_token_iterator')
# Note that we can't have a printer for std::wstreampos, because
# it shares the same underlying type as std::streampos.
add_one_type_printer(obj, 'fpos', 'streampos')
add_one_type_printer(obj, 'basic_string', 'u16string')
add_one_type_printer(obj, 'basic_string', 'u32string')
for dur in ('nanoseconds', 'microseconds', 'milliseconds',
'seconds', 'minutes', 'hours'):
add_one_type_printer(obj, 'duration', dur)
add_one_type_printer(obj, 'linear_congruential_engine', 'minstd_rand0')
add_one_type_printer(obj, 'linear_congruential_engine', 'minstd_rand')
add_one_type_printer(obj, 'mersenne_twister_engine', 'mt19937')
add_one_type_printer(obj, 'mersenne_twister_engine', 'mt19937_64')
add_one_type_printer(obj, 'subtract_with_carry_engine', 'ranlux24_base')
add_one_type_printer(obj, 'subtract_with_carry_engine', 'ranlux48_base')
add_one_type_printer(obj, 'discard_block_engine', 'ranlux24')
add_one_type_printer(obj, 'discard_block_engine', 'ranlux48')
add_one_type_printer(obj, 'shuffle_order_engine', 'knuth_b')
def register_libstdcxx_printers (obj):
"Register libstdc++ pretty-printers with objfile Obj."
global _use_gdb_pp
global libstdcxx_printer
if _use_gdb_pp:
gdb.printing.register_pretty_printer(obj, libstdcxx_printer)
else:
if obj is None:
obj = gdb
obj.pretty_printers.append(libstdcxx_printer)
register_type_printers(obj)
def build_libstdcxx_dictionary ():
global libstdcxx_printer
libstdcxx_printer = Printer("libstdc++-v6")
# For _GLIBCXX_BEGIN_NAMESPACE_VERSION.
vers = '(__7::)?'
# For _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
container = '(__cxx1998::' + vers + ')?'
# libstdc++ objects requiring pretty-printing.
# In order from:
# http://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen/a01847.html
libstdcxx_printer.add_version('std::', 'basic_string', StdStringPrinter)
libstdcxx_printer.add_container('std::', 'bitset', StdBitsetPrinter)
libstdcxx_printer.add_container('std::', 'deque', StdDequePrinter)
libstdcxx_printer.add_container('std::', 'list', StdListPrinter)
libstdcxx_printer.add_container('std::', 'map', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multimap', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multiset', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'queue', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'tuple', StdTuplePrinter)
libstdcxx_printer.add_container('std::', 'set', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'stack', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter)
# vector<bool>
# Printer registrations for classes compiled with -D_GLIBCXX_DEBUG.
libstdcxx_printer.add('std::__debug::bitset', StdBitsetPrinter)
libstdcxx_printer.add('std::__debug::deque', StdDequePrinter)
libstdcxx_printer.add('std::__debug::list', StdListPrinter)
libstdcxx_printer.add('std::__debug::map', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multimap', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multiset', StdSetPrinter)
libstdcxx_printer.add('std::__debug::priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::queue', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::set', StdSetPrinter)
libstdcxx_printer.add('std::__debug::stack', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add('std::__debug::vector', StdVectorPrinter)
# These are the TR1 and C++0x printers.
# For array - the default GDB pretty-printer seems reasonable.
libstdcxx_printer.add_version('std::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_container('std::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'forward_list',
StdForwardListPrinter)
libstdcxx_printer.add_version('std::tr1::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
# These are the C++0x printer registrations for -D_GLIBCXX_DEBUG cases.
# The tr1 namespace printers do not seem to have any debug
# equivalents, so do no register them.
libstdcxx_printer.add('std::__debug::unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::forward_list',
StdForwardListPrinter)
# Extensions.
libstdcxx_printer.add_version('__gnu_cxx::', 'slist', StdSlistPrinter)
if True:
# These shouldn't be necessary, if GDB "print *i" worked.
# But it often doesn't, so here they are.
libstdcxx_printer.add_container('std::', '_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_container('std::', '_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_const_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '__normal_iterator',
StdVectorIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '_Slist_iterator',
StdSlistIteratorPrinter)
# Debug (compiled with -D_GLIBCXX_DEBUG) printer
# registrations. The Rb_tree debug iterator when unwrapped
# from the encapsulating __gnu_debug::_Safe_iterator does not
# have the __norm namespace. Just use the existing printer
# registration for that.
libstdcxx_printer.add('__gnu_debug::_Safe_iterator',
StdDebugIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_iterator',
StdDequeIteratorPrinter)
build_libstdcxx_dictionary ()
|
gpl-2.0
|
bpupadhyaya/kafka
|
tests/kafkatest/services/verifiable_producer.py
|
34
|
3924
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.background_thread import BackgroundThreadService
import json
class VerifiableProducer(BackgroundThreadService):
logs = {
"producer_log": {
"path": "/mnt/producer.log",
"collect_default": False}
}
def __init__(self, context, num_nodes, kafka, topic, max_messages=-1, throughput=100000):
super(VerifiableProducer, self).__init__(context, num_nodes)
self.kafka = kafka
self.topic = topic
self.max_messages = max_messages
self.throughput = throughput
self.acked_values = []
self.not_acked_values = []
def _worker(self, idx, node):
cmd = self.start_cmd
self.logger.debug("VerifiableProducer %d command: %s" % (idx, cmd))
for line in node.account.ssh_capture(cmd):
line = line.strip()
data = self.try_parse_json(line)
if data is not None:
with self.lock:
if data["name"] == "producer_send_error":
data["node"] = idx
self.not_acked_values.append(int(data["value"]))
elif data["name"] == "producer_send_success":
self.acked_values.append(int(data["value"]))
@property
def start_cmd(self):
cmd = "/opt/kafka/bin/kafka-verifiable-producer.sh" \
" --topic %s --broker-list %s" % (self.topic, self.kafka.bootstrap_servers())
if self.max_messages > 0:
cmd += " --max-messages %s" % str(self.max_messages)
if self.throughput > 0:
cmd += " --throughput %s" % str(self.throughput)
cmd += " 2>> /mnt/producer.log | tee -a /mnt/producer.log &"
return cmd
@property
def acked(self):
with self.lock:
return self.acked_values
@property
def not_acked(self):
with self.lock:
return self.not_acked_values
@property
def num_acked(self):
with self.lock:
return len(self.acked_values)
@property
def num_not_acked(self):
with self.lock:
return len(self.not_acked_values)
def stop_node(self, node):
node.account.kill_process("VerifiableProducer", allow_fail=False)
if self.worker_threads is None:
return
# block until the corresponding thread exits
if len(self.worker_threads) >= self.idx(node):
# Need to guard this because stop is preemptively called before the worker threads are added and started
self.worker_threads[self.idx(node) - 1].join()
def clean_node(self, node):
node.account.kill_process("VerifiableProducer", clean_shutdown=False, allow_fail=False)
node.account.ssh("rm -rf /mnt/producer.log", allow_fail=False)
def try_parse_json(self, string):
"""Try to parse a string as json. Return None if not parseable."""
try:
record = json.loads(string)
return record
except ValueError:
self.logger.debug("Could not parse as json: %s" % str(string))
return None
|
apache-2.0
|
EduPepperPDTesting/pepper2013-testing
|
common/lib/xmodule/setup.py
|
3
|
3346
|
from setuptools import setup, find_packages
setup(
name="XModule",
version="0.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'distribute',
'docopt',
'capa',
'path.py',
],
package_data={
'xmodule': ['js/module/*']
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xmodule.v1': [
"abtest = xmodule.abtest_module:ABTestDescriptor",
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"combinedopenended = xmodule.combined_open_ended_module:CombinedOpenEndedDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"peergrading = xmodule.peer_grading_module:PeerGradingDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"timelimit = xmodule.timelimit_module:TimeLimitDescriptor",
"vertical = xmodule.vertical_module:VerticalDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"wrapper = xmodule.wrapper_module:WrapperDescriptor",
"graphical_slider_tool = xmodule.gst_module:GraphicalSliderToolDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"foldit = xmodule.foldit_module:FolditDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"poll_compare = xmodule.poll_compare_module:PollCompareDescriptor",
],
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
]
}
)
|
agpl-3.0
|
KlaasDeNys/Arduino
|
arduino-core/src/processing/app/i18n/python/requests/packages/charade/euckrfreq.py
|
3121
|
45978
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
|
lgpl-2.1
|
caseyrygt/osf.io
|
website/addons/figshare/views/hgrid.py
|
39
|
1145
|
# -*- coding: utf-8 -*-
from website.util import rubeus
from ..api import Figshare
def figshare_hgrid_data(node_settings, auth, parent=None, **kwargs):
node = node_settings.owner
if node_settings.figshare_type == 'project':
item = Figshare.from_settings(node_settings.user_settings).project(node_settings, node_settings.figshare_id)
else:
item = Figshare.from_settings(node_settings.user_settings).article(node_settings, node_settings.figshare_id)
if not node_settings.figshare_id or not node_settings.has_auth or not item:
return
#TODO Test me
#Throw error if neither
node_settings.figshare_title = item.get('title') or item['items'][0]['title']
node_settings.save()
return [
rubeus.build_addon_root(
node_settings, u'{0}:{1}'.format(node_settings.figshare_title or "Unnamed {0}".format(node_settings.figshare_type or ''), node_settings.figshare_id), permissions=auth,
nodeUrl=node.url, nodeApiUrl=node.api_url,
extra={
'status': (item.get('articles') or item['items'])[0]['status'].lower()
}
)
]
|
apache-2.0
|
wood-galaxy/FreeCAD
|
src/Mod/OpenSCAD/OpenSCAD2Dgeom.py
|
7
|
21048
|
#***************************************************************************
#* *
#* Copyright (c) 2012 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - 2D helper fuctions"
__author__ = "Sebastian Hoogen"
__url__ = ["http://www.freecadweb.org"]
'''
This Script includes python functions to convert imported dxf geometry to Faces
'''
class Overlappingfaces():
'''combines overlapping faces together'''
def __init__(self,facelist):
self.sortedfaces = sorted(facelist,key=(lambda shape: shape.Area),reverse=True)
self.builddepdict()
#self.faceindex = {}
#for idx,face in enumerate(self.sortesfaces):
# self.faceindex[face.hashCode()] = idx
# def __len__(self):
# return len(self.sortedfaces)
@staticmethod
def dofacesoverlapboundbox(bigface,smallface):
return bigface.BoundBox.isIntersection(smallface.BoundBox)
@staticmethod
def dofacesoverlapallverts(bigface,smallface):
def vertsinface(f1,verts,tol=0.001,inface=True):
'''check if all given verts are inside shape f1'''
return all([f1.isInside(vert.Point,tol,inface) for vert in verts])
return vertsinface(bigface,smallface.Vertexes)
@staticmethod
def dofacesoverlapproximity(bigface,smallface):
l1,l2 = bigface.proximity(smallface)
return len(l1) > 0 or len(l2) > 0
@staticmethod
def dofacesoverlapboolean(bigface,smallface):
#import FreeCAD,FreeCADGui
#FreeCAD.Console.PrintLog('intersecting %d %d\n'%(bigfacei,smallfacei))
#FreeCADGui.updateGui()
return bigface.common(smallface).Area > 0
def builddepdict(self):
import Part
import itertools
#isinsidelist = []
self.isinsidedict = {}
#for bigface, smallface in itertools.combinations(sortedfaces,2):
for bigfacei, smallfacei in\
itertools.combinations(range(len(self.sortedfaces)),2):
try:
overlap = Overlappingfaces.dofacesoverlapproximity(\
self.sortedfaces[bigfacei],self.sortedfaces[smallfacei])
except (NotImplementedError, Part.OCCError) as e:
try:
overlap = Overlappingfaces.dofacesoverlapboolean(\
self.sortedfaces[bigfacei],\
self.sortedfaces[smallfacei])
except Part.OCCError:
overlap = Overlappingfaces.dofacesoverlapallverts(\
self.sortedfaces[bigfacei],\
self.sortedfaces[smallfacei])
if overlap:
#isinsidelist.append((bigfacei,smallfacei))
smallinbig = self.isinsidedict.get(bigfacei,[])
smallinbig.append(smallfacei)
if len(smallinbig) == 1:
self.isinsidedict[bigfacei] = smallinbig
@staticmethod
def finddepth(dict1,faceidx,curdepth=0):
if faceidx not in dict1:
return curdepth+1
else:
#print dict1[faceidx],[(finddepth(dict1,childface,curdepth)) for childface in dict1[faceidx]]
return max([(Overlappingfaces.finddepth(dict1,childface,curdepth+1)) for childface in dict1[faceidx]])
def findrootdepth(self):
return max([Overlappingfaces.finddepth(self.isinsidedict,fi) for fi in range(len(self.sortedfaces))])
def hasnoparent(self,faceindex):
return Overlappingfaces.hasnoparentstatic(self.isinsidedict,faceindex)
@staticmethod
def hasnoparentstatic(isinsidedict,faceindex):
for smalllist in isinsidedict.itervalues():
if faceindex in smalllist:
return False
return True
#@staticmethod
#def subtreedict(rootface,parantdict):
# '''biuld a subtree dictinary'''
# newdict = parantdict.copy()
# del newdict[rootface]
# return newdict
@staticmethod
def directchildren(isinsidedict,parent):
#return [child for child in isinsidedict.get(parent,[]) if child not in isinsidedict]
dchildren=[]
for child in isinsidedict.get(parent,[]):
direct = True
for key, value in isinsidedict.iteritems():
if key != parent and child in value and parent not in value:
direct = False
if direct:
dchildren.append(child)
return dchildren
#@staticmethod
#def indirectchildren(isinsidedict,parent):
# return [child for child in isinsidedict.get(parent,[]) if child in isinsidedict]
@staticmethod
def printtree(isinsidedict,facenum):
def printtreechild(isinsidedict,facenum,parent):
children=Overlappingfaces.directchildren(isinsidedict,parent)
print 'parent %d directchild %s' % (parent,children)
if children:
subdict=isinsidedict.copy()
del subdict[parent]
for child in children:
printtreechild(subdict,facenum,child)
rootitems=[fi for fi in range(facenum) if Overlappingfaces.hasnoparentstatic(isinsidedict,fi)]
for rootitem in rootitems:
printtreechild(isinsidedict,facenum,rootitem)
def makefeatures(self,doc):
import FreeCAD
def addshape(faceindex):
obj=doc.addObject('Part::Feature','facefromedges_%d' % faceindex)
obj.Shape = self.sortedfaces[faceindex]
obj.ViewObject.hide()
return obj
def addfeature(faceindex,isinsidedict):
directchildren = Overlappingfaces.directchildren(isinsidedict,faceindex)
if len(directchildren) == 0:
obj=addshape(faceindex)
else:
subdict=isinsidedict.copy()
del subdict[faceindex]
obj=doc.addObject("Part::Cut","facesfromedges_%d" % faceindex)
obj.Base= addshape(faceindex) #we only do subtraction
if len(directchildren) == 1:
obj.Tool = addfeature(directchildren[0],subdict)
else:
obj.Tool = doc.addObject("Part::MultiFuse",\
"facesfromedges_union")
obj.Tool.Shapes = [addfeature(child,subdict)\
for child in directchildren]
obj.Tool.ViewObject.hide()
obj.ViewObject.hide()
return obj
rootitems = [fi for fi in range(len(self.sortedfaces)) if self.hasnoparent(fi)]
for rootitem in rootitems:
addfeature(rootitem,self.isinsidedict).ViewObject.show()
def makeshape(self):
def removefaces(rfaces):
for tfi in directchildren[::-1]:
finishedwith.append(tfi)
#del faces[tfi]
if tfi in isinsidedict:
del isinsidedict[tfi]
for key,value in isinsidedict.iteritems():
if tfi in value:
newlist=value[:] #we work on a shallow copy of isinsidedict
newlist.remove(tfi)
isinsidedict[key]=newlist
def hasnoparent(faceindex):
for smalllist in self.isinsidedict.itervalues():
if faceindex in smalllist:
return False
return True
faces=self.sortedfaces[:]
isinsidedict=self.isinsidedict.copy()
finishedwith=[]
while not all([Overlappingfaces.hasnoparentstatic(isinsidedict,fi) for fi in range(len(faces))]):
#print [(Overlappingfaces.hasnoparentstatic(isinsidedict,fi),\
#Overlappingfaces.directchildren(isinsidedict,fi)) for fi in range(len(faces))]
for fi in range(len(faces))[::-1]:
directchildren = Overlappingfaces.directchildren(isinsidedict,fi)
if not directchildren:
continue
elif len(directchildren) == 1:
faces[fi]=faces[fi].cut(faces[directchildren[0]])
#print fi,'-' ,directchildren[0], faces[fi],faces[directchildren[0]]
removefaces(directchildren)
else:
toolface=fusefaces([faces[tfi] for tfi in directchildren])
faces[fi]=faces[fi].cut(toolface)
#print fi, '- ()', directchildren, [faces[tfi] for tfi in directchildren]
removefaces(directchildren)
#print fi,directchildren
faces =[face for index,face in enumerate(faces) if index not in finishedwith]
# return faces
return fusefaces(faces)
def findConnectedEdges(edgelist,eps=1e-6,debug=False):
'''returns a list of list of connected edges'''
def vertequals(v1,v2,eps=1e-6):
'''check two vertices for equality'''
#return all([abs(c1-c2)<eps for c1,c2 in zip(v1.Point,v2.Point)])
return v1.Point.sub(v2.Point).Length<eps
def vertindex(forward):
'''return index of last or first element'''
return -1 if forward else 0
freeedges = edgelist[:]
retlist = []
debuglist = []
while freeedges:
startwire = freeedges.pop(0)
forward = True
newedge = [(startwire,True)]
for forward in (True, False):
found = True
while found:
lastvert = newedge[vertindex(forward)][0].Vertexes[vertindex(forward == newedge[vertindex(forward)][1])]
for ceindex, checkedge in enumerate(freeedges):
found = False
for cvindex, cvert in enumerate([checkedge.Vertexes[0],checkedge.Vertexes[-1]]):
if vertequals(lastvert,cvert,eps):
if forward:
newedge.append((checkedge,cvindex == 0))
else:
newedge.insert(0,(checkedge,cvindex == 1))
del freeedges[ceindex]
found = True
break
else:
found = False
if found:
break
else:
found = False
#we are finished for this edge
debuglist.append(newedge)
retlist.append([item[0] for item in newedge]) #strip off direction
#print debuglist
if debug:
return retlist,debuglist
else:
return retlist
def endpointdistance(edges):
'''return the distance of of vertices in path (list of edges) as
maximum, mininum and distance between start and endpoint
it expects the edges to be traversed forward from starting from Vertex 0'''
numedges=len(edges)
if numedges == 1 and len(edges[0].Vertexes) == 1:
return 0.0,0.0,0.0
outerdistance = edges[0].Vertexes[0].Point.sub(\
edges[-1].Vertexes[-1].Point).Length
if numedges > 1:
innerdistances=[edges[i].Vertexes[-1].Point.sub(edges[i+1].\
Vertexes[0].Point).Length for i in range(numedges-1)]
return max(innerdistances),min(innerdistances),outerdistance
else:
return 0.0,0.0,outerdistance
def endpointdistancedebuglist(debuglist):
'''return the distance of of vertices in path (list of edges) as
maximum, mininum and distance between start and endpoint
it it expects a 'not reversed' flag for every edge'''
numedges=len(debuglist)
if numedges == 1 and len(debuglist[0][0].Vertexes) == 1:
return 0.0,0.0,0.0
outerdistance = debuglist[0][0].Vertexes[(not debuglist[0][1])*-1].\
Point.sub(debuglist[-1][0].Vertexes[(debuglist[-1][1])*-1].\
Point).Length
if numedges > 1:
innerdistances=[debuglist[i][0].Vertexes[debuglist[i][1]*-1].\
Point.sub(debuglist[i+1][0].Vertexes[(not debuglist[i+1][1])*\
-1].Point).Length for i in range(numedges-1)]
return max(innerdistances),min(innerdistances),outerdistance
else:
return 0.0,0.0,outerdistance
def edgestowires(edgelist,eps=0.001):
'''takes list of edges and returns a list of wires'''
import Part, Draft
# todo remove double edges
wirelist=[]
#for path in findConnectedEdges(edgelist,eps=eps):
for path,debug in zip(*findConnectedEdges(edgelist,eps=eps,debug=True)):
maxd,mind,outerd = endpointdistancedebuglist(debug)
assert(maxd <= eps*2) # Assume the input to be broken
if maxd < eps*2 and maxd > 0.000001: #OCC wont like it if maxd > 0.02:
print 'endpointdistance max:%f min:%f, ends:%f' %(maxd,mind,outerd)
if True:
tobeclosed = outerd < eps*2
# OpenSCAD uses 0.001 for corase grid
#from draftlibs import fcvec, fcgeo
#w2=fcgeo.superWire(path,tobeclosed)
w2=superWireReverse(debug,tobeclosed)
wirelist.append(w2)
else:#this locks up FreeCAD
comp=Part.Compound(path)
wirelist.append(comp.connectEdgesToWires(False,eps).Wires[0])
#wirelist.append(comp.connectEdgesToWires(False,0.1).Wires[0])
else:
done = False
try:
wire=Part.Wire(path)
#if not close or wire.isClosed or outerd > 0.0001:
wirelist.append(Part.Wire(path))
done = True
except Part.OCCError:
pass
if not done:
comp=Part.Compound(path)
wirelist.append(comp.connectEdgesToWires(False,eps).Wires[0])
return wirelist
def subtractfaces(faces):
'''searches for the biggest face and subtracts all smaller ones from the
first. Only makes sense if all faces overlap.'''
if len(faces)==1:
return faces[0]
else:
facelist=sorted(faces,key=(lambda shape: shape.Area),reverse=True)
base=facelist[0]
tool=reduce(lambda p1,p2: p1.fuse(p2),facelist[1:])
return base.cut(tool)
def fusefaces(faces):
if len(faces)==1:
return faces[0]
else:
return reduce(lambda p1,p2: p1.fuse(p2),faces)
def subtractfaces2(faces):
'''Sort faces, check if they overlap. Subtract overlapping face and fuse
nonoverlapping groups.'''
return fusefaces([subtractfaces(facegroup) for facegroup in findoverlappingfaces(faces)])
def edgestofaces(edges,algo=3,eps=0.001):
#edges=[]
#for shapeobj in (objs):
# edges.extend(shapeobj.Shape.Edges)
#taken from Drafttools
#from draftlibs import fcvec, fcgeo
import Part
#wires = fcgeo.findWires(edges)
wires = edgestowires(edges,eps)
facel=[]
for w in wires:
#assert(len(w.Edges)>1)
if not w.isClosed():
p0 = w.Vertexes[0].Point
p1 = w.Vertexes[-1].Point
edges2 = w.Edges[:]
try:
edges2.append(Part.Line(p1,p0).toShape())
w = Part.Wire(edges2)
#w = Part.Wire(fcgeo.sortEdges(edges2))
except OCCError:
comp=Part.Compound(edges2)
w = comp.connectEdgesToWires(False,eps).Wires[0]
facel.append(Part.Face(w))
#if w.isValid: #debuging
# facel.append(Part.Face(w))
#else:
# Part.show(w)
if algo is None:
return facel
elif algo == 1: #stabale behavior
return subtractfaces(facel)
elif algo == 0: #return all faces
return Part.Compound(facel)
elif algo == 2:
return subtractfaces2(facel)
elif algo == 3:
return Overlappingfaces(facel).makeshape()
def superWireReverse(debuglist,closed=False):
'''superWireReverse(debuglist,[closed]): forces a wire between edges
that don't necessarily have coincident endpoints. If closed=True, wire
will always be closed. debuglist has a tuple for every edge.The first
entry is the edge, the second is the flag 'does not nedd to be inverted'
'''
#taken from draftlibs
def median(v1,v2):
vd = v2.sub(v1)
vd.scale(.5,.5,.5)
return v1.add(vd)
try:
from DraftGeomUtils import findMidpoint
except ImportError: #workaround for Version 0.12
from draftlibs.fcgeo import findMidpoint #workaround for Version 0.12
import Part
#edges = sortEdges(edgeslist)
print debuglist
newedges = []
for i in range(len(debuglist)):
curr = debuglist[i]
if i == 0:
if closed:
prev = debuglist[-1]
else:
prev = None
else:
prev = debuglist[i-1]
if i == (len(debuglist)-1):
if closed:
nexte = debuglist[0]
else:
nexte = None
else:
nexte = debuglist[i+1]
print i,prev,curr,nexte
if prev:
if curr[0].Vertexes[-1*(not curr[1])].Point == \
prev[0].Vertexes[-1*prev[1]].Point:
p1 = curr[0].Vertexes[-1*(not curr[1])].Point
else:
p1 = median(curr[0].Vertexes[-1*(not curr[1])].Point,\
prev[0].Vertexes[-1*prev[1]].Point)
else:
p1 = curr[0].Vertexes[-1*(not curr[1])].Point
if nexte:
if curr[0].Vertexes[-1*curr[1]].Point == \
nexte[0].Vertexes[-1*(not nexte[1])].Point:
p2 = nexte[0].Vertexes[-1*(not nexte[1])].Point
else:
p2 = median(curr[0].Vertexes[-1*(curr[1])].Point,\
nexte[0].Vertexes[-1*(not nexte[1])].Point)
else:
p2 = curr[0].Vertexes[-1*(curr[1])].Point
if isinstance(curr[0].Curve,Part.Line):
print "line",p1,p2
newedges.append(Part.Line(p1,p2).toShape())
elif isinstance(curr[0].Curve,Part.Circle):
p3 = findMidpoint(curr[0])
print "arc",p1,p3,p2
newedges.append(Part.Arc(p1,p3,p2).toShape())
else:
print "Cannot superWire edges that are not lines or arcs"
return None
print newedges
return Part.Wire(newedges)
def importDXFface(filename,layer=None,doc=None):
import FreeCAD,importDXF
importDXF.readPreferences()
importDXF.getDXFlibs()
importDXF.dxfMakeBlocks = False
doc = doc or FreeCAD.activeDocument()
layers = importDXF.processdxf(doc,filename) or importDXF.layers
for l in layers:
if FreeCAD.GuiUp:
for o in l.Group:
o.ViewObject.hide()
l.ViewObject.hide()
groupobj=[go for go in layers if (not layer) or go.Label == layer]
edges=[]
if not groupobj:
raise ValueError, 'import of layer %s failed' % layer
for shapeobj in groupobj[0].Group:
edges.extend(shapeobj.Shape.Edges)
faces = edgestofaces(edges)
# in order to allow multiple import with the same layer name
# we need to remove used objects from the layer group
#shapeobj.Document.removeObject(shapeobj.Name)
#groupobj[0].Document.removeObject(groupobj[0].Name)
for layer in layers: #remove everything that has been imported
layer.removeObjectsFromDocument()
#for obj in layer.Group:
# obj.Document.removeObject(obj.Name)
layer.Document.removeObject(layer.Name)
return faces
|
lgpl-2.1
|
leki75/ansible
|
lib/ansible/modules/network/netvisor/pn_show.py
|
59
|
5460
|
#!/usr/bin/python
""" PN CLI show commands """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_show
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: Run show commands on nvOS device.
description:
- Execute show command in the nodes and returns the results
read from the device.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
pn_command:
description:
- The C(pn_command) takes a CLI show command as value.
required: true
pn_parameters:
description:
- Display output using a specific parameter. Use 'all' to display possible
output. List of comma separated parameters.
pn_options:
description:
- Specify formatting options.
"""
EXAMPLES = """
- name: run the vlan-show command
pn_show:
pn_command: 'vlan-show'
pn_parameters: id,scope,ports
pn_options: 'layout vertical'
- name: run the vlag-show command
pn_show:
pn_command: 'vlag-show'
pn_parameters: 'id,name,cluster,mode'
pn_options: 'no-show-headers'
- name: run the cluster-show command
pn_show:
pn_command: 'cluster-show'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the show command.
returned: always
type: list
stderr:
description: The set of error responses from the show command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused any change on the target.
returned: always(False)
type: bool
"""
import shlex
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
command = module.params['pn_command']
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stderr=err.strip(),
changed=False
)
if out:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stdout=out.strip(),
changed=False
)
else:
module.exit_json(
command=cli,
msg='%s: Nothing to display!!!' % command,
changed=False
)
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
pn_command=dict(required=True, type='str'),
pn_parameters=dict(default='all', type='str'),
pn_options=dict(type='str')
)
)
# Accessing the arguments
command = module.params['pn_command']
parameters = module.params['pn_parameters']
options = module.params['pn_options']
# Building the CLI command string
cli = pn_cli(module)
cli += ' %s format %s ' % (command, parameters)
if options:
cli += options
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
SaranyaKarthikeyan/boto
|
boto/services/service.py
|
170
|
6632
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.message import ServiceMessage
from boto.services.servicedef import ServiceDef
from boto.pyami.scriptbase import ScriptBase
from boto.utils import get_ts
import time
import os
import mimetypes
class Service(ScriptBase):
# Time required to process a transaction
ProcessingTime = 60
def __init__(self, config_file=None, mimetype_files=None):
super(Service, self).__init__(config_file)
self.name = self.__class__.__name__
self.working_dir = boto.config.get('Pyami', 'working_dir')
self.sd = ServiceDef(config_file)
self.retry_count = self.sd.getint('retry_count', 5)
self.loop_delay = self.sd.getint('loop_delay', 30)
self.processing_time = self.sd.getint('processing_time', 60)
self.input_queue = self.sd.get_obj('input_queue')
self.output_queue = self.sd.get_obj('output_queue')
self.output_domain = self.sd.get_obj('output_domain')
if mimetype_files:
mimetypes.init(mimetype_files)
def split_key(key):
if key.find(';') < 0:
t = (key, '')
else:
key, type = key.split(';')
label, mtype = type.split('=')
t = (key, mtype)
return t
def read_message(self):
boto.log.info('read_message')
message = self.input_queue.read(self.processing_time)
if message:
boto.log.info(message.get_body())
key = 'Service-Read'
message[key] = get_ts()
return message
# retrieve the source file from S3
def get_file(self, message):
bucket_name = message['Bucket']
key_name = message['InputKey']
file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file'))
boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name))
bucket = boto.lookup('s3', bucket_name)
key = bucket.new_key(key_name)
key.get_contents_to_filename(os.path.join(self.working_dir, file_name))
return file_name
# process source file, return list of output files
def process_file(self, in_file_name, msg):
return []
# store result file in S3
def put_file(self, bucket_name, file_path, key_name=None):
boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name))
bucket = boto.lookup('s3', bucket_name)
key = bucket.new_key(key_name)
key.set_contents_from_filename(file_path)
return key
def save_results(self, results, input_message, output_message):
output_keys = []
for file, type in results:
if 'OutputBucket' in input_message:
output_bucket = input_message['OutputBucket']
else:
output_bucket = input_message['Bucket']
key_name = os.path.split(file)[1]
key = self.put_file(output_bucket, file, key_name)
output_keys.append('%s;type=%s' % (key.name, type))
output_message['OutputKey'] = ','.join(output_keys)
# write message to each output queue
def write_message(self, message):
message['Service-Write'] = get_ts()
message['Server'] = self.name
if 'HOSTNAME' in os.environ:
message['Host'] = os.environ['HOSTNAME']
else:
message['Host'] = 'unknown'
message['Instance-ID'] = self.instance_id
if self.output_queue:
boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id)
self.output_queue.write(message)
if self.output_domain:
boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name)
item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']])
self.output_domain.put_attributes(item_name, message)
# delete message from input queue
def delete_message(self, message):
boto.log.info('deleting message from %s' % self.input_queue.id)
self.input_queue.delete_message(message)
# to clean up any files, etc. after each iteration
def cleanup(self):
pass
def shutdown(self):
on_completion = self.sd.get('on_completion', 'shutdown')
if on_completion == 'shutdown':
if self.instance_id:
time.sleep(60)
c = boto.connect_ec2()
c.terminate_instances([self.instance_id])
def main(self, notify=False):
self.notify('Service: %s Starting' % self.name)
empty_reads = 0
while self.retry_count < 0 or empty_reads < self.retry_count:
try:
input_message = self.read_message()
if input_message:
empty_reads = 0
output_message = ServiceMessage(None, input_message.get_body())
input_file = self.get_file(input_message)
results = self.process_file(input_file, output_message)
self.save_results(results, input_message, output_message)
self.write_message(output_message)
self.delete_message(input_message)
self.cleanup()
else:
empty_reads += 1
time.sleep(self.loop_delay)
except Exception:
boto.log.exception('Service Failed')
empty_reads += 1
self.notify('Service: %s Shutting Down' % self.name)
self.shutdown()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.