repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
Liuer/three.js | utils/exporters/blender/addons/io_three/exporter/api/camera.py | 125 | 1823 | from bpy import data, types, context
from .. import logger
def _camera(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Camera):
camera = name
else:
camera = data.cameras[name]
return func(camera, *args, **kwargs)
return inner
@_camera
def aspect(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.aspect(%s)", camera)
render = context.scene.render
return render.resolution_x/render.resolution_y
@_camera
def bottom(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.bottom(%s)", camera)
return -(camera.angle_y * camera.ortho_scale)
@_camera
def far(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.far(%s)", camera)
return camera.clip_end
@_camera
def fov(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.fov(%s)", camera)
return camera.lens
@_camera
def left(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.left(%s)", camera)
return -(camera.angle_x * camera.ortho_scale)
@_camera
def near(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.near(%s)", camera)
return camera.clip_start
@_camera
def right(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.right(%s)", camera)
return camera.angle_x * camera.ortho_scale
@_camera
def top(camera):
"""
:param camera:
:rtype: float
"""
logger.debug("camera.top(%s)", camera)
return camera.angle_y * camera.ortho_scale
| mit |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/whoosh/query/ranges.py | 92 | 13420 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
from whoosh.compat import b, u
from whoosh.query import qcore, terms, compound, wrappers
from whoosh.util.times import datetime_to_long
class RangeMixin(object):
# Contains methods shared by TermRange and NumericRange
def __repr__(self):
return ('%s(%r, %r, %r, %s, %s, boost=%s, constantscore=%s)'
% (self.__class__.__name__, self.fieldname, self.start,
self.end, self.startexcl, self.endexcl, self.boost,
self.constantscore))
def __unicode__(self):
startchar = "{" if self.startexcl else "["
endchar = "}" if self.endexcl else "]"
start = '' if self.start is None else self.start
end = '' if self.end is None else self.end
return u("%s:%s%s TO %s%s") % (self.fieldname, startchar, start, end,
endchar)
__str__ = __unicode__
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.start == other.start and self.end == other.end
and self.startexcl == other.startexcl
and self.endexcl == other.endexcl
and self.boost == other.boost
and self.constantscore == other.constantscore)
def __hash__(self):
return (hash(self.fieldname) ^ hash(self.start) ^ hash(self.startexcl)
^ hash(self.end) ^ hash(self.endexcl) ^ hash(self.boost))
def is_range(self):
return True
def _comparable_start(self):
if self.start is None:
return (qcore.Lowest, 0)
else:
second = 1 if self.startexcl else 0
return (self.start, second)
def _comparable_end(self):
if self.end is None:
return (qcore.Highest, 0)
else:
second = -1 if self.endexcl else 0
return (self.end, second)
def overlaps(self, other):
if not isinstance(other, TermRange):
return False
if self.fieldname != other.fieldname:
return False
start1 = self._comparable_start()
start2 = other._comparable_start()
end1 = self._comparable_end()
end2 = other._comparable_end()
return ((start1 >= start2 and start1 <= end2)
or (end1 >= start2 and end1 <= end2)
or (start2 >= start1 and start2 <= end1)
or (end2 >= start1 and end2 <= end1))
def merge(self, other, intersect=True):
assert self.fieldname == other.fieldname
start1 = self._comparable_start()
start2 = other._comparable_start()
end1 = self._comparable_end()
end2 = other._comparable_end()
if start1 >= start2 and end1 <= end2:
start = start2
end = end2
elif start2 >= start1 and end2 <= end1:
start = start1
end = end1
elif intersect:
start = max(start1, start2)
end = min(end1, end2)
else:
start = min(start1, start2)
end = max(end1, end2)
startval = None if start[0] is qcore.Lowest else start[0]
startexcl = start[1] == 1
endval = None if end[0] is qcore.Highest else end[0]
endexcl = end[1] == -1
boost = max(self.boost, other.boost)
constantscore = self.constantscore or other.constantscore
return self.__class__(self.fieldname, startval, endval, startexcl,
endexcl, boost=boost,
constantscore=constantscore)
class TermRange(RangeMixin, terms.MultiTerm):
"""Matches documents containing any terms in a given range.
>>> # Match documents where the indexed "id" field is greater than or equal
>>> # to 'apple' and less than or equal to 'pear'.
>>> TermRange("id", u"apple", u"pear")
"""
def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
boost=1.0, constantscore=True):
"""
:param fieldname: The name of the field to search.
:param start: Match terms equal to or greater than this.
:param end: Match terms equal to or less than this.
:param startexcl: If True, the range start is exclusive. If False, the
range start is inclusive.
:param endexcl: If True, the range end is exclusive. If False, the
range end is inclusive.
:param boost: Boost factor that should be applied to the raw score of
results matched by this query.
"""
self.fieldname = fieldname
self.start = start
self.end = end
self.startexcl = startexcl
self.endexcl = endexcl
self.boost = boost
self.constantscore = constantscore
def normalize(self):
if self.start in ('', None) and self.end in (u('\uffff'), None):
from whoosh.query import Every
return Every(self.fieldname, boost=self.boost)
elif self.start == self.end:
if self.startexcl or self.endexcl:
return qcore.NullQuery
return terms.Term(self.fieldname, self.start, boost=self.boost)
else:
return TermRange(self.fieldname, self.start, self.end,
self.startexcl, self.endexcl,
boost=self.boost)
#def replace(self, fieldname, oldtext, newtext):
# q = self.copy()
# if q.fieldname == fieldname:
# if q.start == oldtext:
# q.start = newtext
# if q.end == oldtext:
# q.end = newtext
# return q
def _btexts(self, ixreader):
fieldname = self.fieldname
field = ixreader.schema[fieldname]
startexcl = self.startexcl
endexcl = self.endexcl
if self.start is None:
start = b("")
else:
try:
start = field.to_bytes(self.start)
except ValueError:
return
if self.end is None:
end = b("\xFF\xFF\xFF\xFF")
else:
try:
end = field.to_bytes(self.end)
except ValueError:
return
for fname, t in ixreader.terms_from(fieldname, start):
if fname != fieldname:
break
if t == start and startexcl:
continue
if t == end and endexcl:
break
if t > end:
break
yield t
class NumericRange(RangeMixin, qcore.Query):
"""A range query for NUMERIC fields. Takes advantage of tiered indexing
to speed up large ranges by matching at a high resolution at the edges of
the range and a low resolution in the middle.
>>> # Match numbers from 10 to 5925 in the "number" field.
>>> nr = NumericRange("number", 10, 5925)
"""
def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
boost=1.0, constantscore=True):
"""
:param fieldname: The name of the field to search.
:param start: Match terms equal to or greater than this number. This
should be a number type, not a string.
:param end: Match terms equal to or less than this number. This should
be a number type, not a string.
:param startexcl: If True, the range start is exclusive. If False, the
range start is inclusive.
:param endexcl: If True, the range end is exclusive. If False, the
range end is inclusive.
:param boost: Boost factor that should be applied to the raw score of
results matched by this query.
:param constantscore: If True, the compiled query returns a constant
score (the value of the ``boost`` keyword argument) instead of
actually scoring the matched terms. This gives a nice speed boost
and won't affect the results in most cases since numeric ranges
will almost always be used as a filter.
"""
self.fieldname = fieldname
self.start = start
self.end = end
self.startexcl = startexcl
self.endexcl = endexcl
self.boost = boost
self.constantscore = constantscore
def simplify(self, ixreader):
return self._compile_query(ixreader).simplify(ixreader)
def estimate_size(self, ixreader):
return self._compile_query(ixreader).estimate_size(ixreader)
def estimate_min_size(self, ixreader):
return self._compile_query(ixreader).estimate_min_size(ixreader)
def docs(self, searcher):
q = self._compile_query(searcher.reader())
return q.docs(searcher)
def _compile_query(self, ixreader):
from whoosh.fields import NUMERIC
from whoosh.util.numeric import tiered_ranges
field = ixreader.schema[self.fieldname]
if not isinstance(field, NUMERIC):
raise Exception("NumericRange: field %r is not numeric"
% self.fieldname)
start = self.start
if start is not None:
start = field.prepare_number(start)
end = self.end
if end is not None:
end = field.prepare_number(end)
subqueries = []
stb = field.sortable_to_bytes
# Get the term ranges for the different resolutions
ranges = tiered_ranges(field.numtype, field.bits, field.signed,
start, end, field.shift_step,
self.startexcl, self.endexcl)
for startnum, endnum, shift in ranges:
if startnum == endnum:
subq = terms.Term(self.fieldname, stb(startnum, shift))
else:
startbytes = stb(startnum, shift)
endbytes = stb(endnum, shift)
subq = TermRange(self.fieldname, startbytes, endbytes)
subqueries.append(subq)
if len(subqueries) == 1:
q = subqueries[0]
elif subqueries:
q = compound.Or(subqueries, boost=self.boost)
else:
return qcore.NullQuery
if self.constantscore:
q = wrappers.ConstantScoreQuery(q, self.boost)
return q
def matcher(self, searcher, context=None):
q = self._compile_query(searcher.reader())
return q.matcher(searcher, context)
class DateRange(NumericRange):
"""This is a very thin subclass of :class:`NumericRange` that only
overrides the initializer and ``__repr__()`` methods to work with datetime
objects instead of numbers. Internally this object converts the datetime
objects it's created with to numbers and otherwise acts like a
``NumericRange`` query.
>>> DateRange("date", datetime(2010, 11, 3, 3, 0),
... datetime(2010, 11, 3, 17, 59))
"""
def __init__(self, fieldname, start, end, startexcl=False, endexcl=False,
boost=1.0, constantscore=True):
self.startdate = start
self.enddate = end
if start:
start = datetime_to_long(start)
if end:
end = datetime_to_long(end)
super(DateRange, self).__init__(fieldname, start, end,
startexcl=startexcl, endexcl=endexcl,
boost=boost,
constantscore=constantscore)
def __repr__(self):
return '%s(%r, %r, %r, %s, %s, boost=%s)' % (self.__class__.__name__,
self.fieldname,
self.startdate, self.enddate,
self.startexcl, self.endexcl,
self.boost)
| bsd-3-clause |
sysadminmatmoz/OCB | addons/sale/sale.py | 2 | 43709 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.exceptions import UserError
from openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
class res_company(models.Model):
_inherit = "res.company"
sale_note = fields.Text(string='Default Terms and Conditions', translate=True)
class SaleOrder(models.Model):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_order = 'date_order desc, id desc'
@api.depends('order_line.price_total')
def _amount_all(self):
"""
Compute the total amounts of the SO.
"""
for order in self:
amount_untaxed = amount_tax = 0.0
for line in order.order_line:
amount_untaxed += line.price_subtotal
amount_tax += line.price_tax
order.update({
'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),
'amount_tax': order.pricelist_id.currency_id.round(amount_tax),
'amount_total': amount_untaxed + amount_tax,
})
@api.depends('state', 'order_line.invoice_status')
def _get_invoiced(self):
"""
Compute the invoice status of a SO. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: if any SO line is 'to invoice', the whole SO is 'to invoice'
- invoiced: if all SO lines are invoiced, the SO is invoiced.
- upselling: if all SO lines are invoiced or upselling, the status is upselling.
The invoice_ids are obtained thanks to the invoice lines of the SO lines, and we also search
for possible refunds created directly from existing invoices. This is necessary since such a
refund is not directly linked to the SO.
"""
for order in self:
invoice_ids = order.order_line.mapped('invoice_lines').mapped('invoice_id')
# Search for invoices which have been 'cancelled' (filter_refund = 'modify' in
# 'account.invoice.refund')
# use like as origin may contains multiple references (e.g. 'SO01, SO02')
refunds = invoice_ids.search([('origin', 'like', order.name)])
invoice_ids |= refunds.filtered(lambda r: order.name in [origin.strip() for origin in r.origin.split(',')])
# Search for refunds as well
refund_ids = self.env['account.invoice'].browse()
if invoice_ids:
refund_ids = refund_ids.search([('type', '=', 'out_refund'), ('origin', 'in', invoice_ids.mapped('number')), ('origin', '!=', False)])
line_invoice_status = [line.invoice_status for line in order.order_line]
if order.state not in ('sale', 'done'):
invoice_status = 'no'
elif any(invoice_status == 'to invoice' for invoice_status in line_invoice_status):
invoice_status = 'to invoice'
elif all(invoice_status == 'invoiced' for invoice_status in line_invoice_status):
invoice_status = 'invoiced'
elif all(invoice_status in ['invoiced', 'upselling'] for invoice_status in line_invoice_status):
invoice_status = 'upselling'
else:
invoice_status = 'no'
order.update({
'invoice_count': len(set(invoice_ids.ids + refund_ids.ids)),
'invoice_ids': invoice_ids.ids + refund_ids.ids,
'invoice_status': invoice_status
})
@api.model
def _default_note(self):
return self.env.user.company_id.sale_note
@api.model
def _get_default_team(self):
default_team_id = self.env['crm.team']._get_default_team_id()
return self.env['crm.team'].browse(default_team_id)
@api.onchange('fiscal_position_id')
def _compute_tax_id(self):
"""
Trigger the recompute of the taxes if the fiscal position is changed on the SO.
"""
for order in self:
order.order_line._compute_tax_id()
name = fields.Char(string='Order Reference', required=True, copy=False, readonly=True, index=True, default=lambda self: _('New'))
origin = fields.Char(string='Source Document', help="Reference of the document that generated this sales order request.")
client_order_ref = fields.Char(string='Customer Reference', copy=False)
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sale Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
date_order = fields.Datetime(string='Order Date', required=True, readonly=True, index=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False, default=fields.Datetime.now)
validity_date = fields.Date(string='Expiration Date', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
create_date = fields.Datetime(string='Creation Date', readonly=True, index=True, help="Date on which sales order is created.")
user_id = fields.Many2one('res.users', string='Salesperson', index=True, track_visibility='onchange', default=lambda self: self.env.user)
partner_id = fields.Many2one('res.partner', string='Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, index=True, track_visibility='always')
partner_invoice_id = fields.Many2one('res.partner', string='Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order.")
partner_shipping_id = fields.Many2one('res.partner', string='Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order.")
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order.")
currency_id = fields.Many2one("res.currency", related='pricelist_id.currency_id', string="Currency", readonly=True, required=True)
project_id = fields.Many2one('account.analytic.account', 'Analytic Account', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order.", copy=False, domain=[('account_type', '=', 'normal')])
order_line = fields.One2many('sale.order.line', 'order_id', string='Order Lines', states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True)
invoice_count = fields.Integer(string='# of Invoices', compute='_get_invoiced', readonly=True)
invoice_ids = fields.Many2many("account.invoice", string='Invoices', compute="_get_invoiced", readonly=True, copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_get_invoiced', store=True, readonly=True, default='no')
note = fields.Text('Terms and conditions', default=_default_note)
amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all', track_visibility='always')
amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')
payment_term_id = fields.Many2one('account.payment.term', string='Payment Term', oldname='payment_term')
fiscal_position_id = fields.Many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('sale.order'))
team_id = fields.Many2one('crm.team', 'Sales Team', change_default=True, default=_get_default_team, oldname='section_id')
procurement_group_id = fields.Many2one('procurement.group', 'Procurement Group', copy=False)
product_id = fields.Many2one('product.product', related='order_line.product_id', string='Product')
@api.model
def _get_customer_lead(self, product_tmpl_id):
return False
@api.multi
def button_dummy(self):
return True
@api.multi
def unlink(self):
for order in self:
if order.state != 'draft':
raise UserError(_('You can only delete draft quotations!'))
return super(SaleOrder, self).unlink()
@api.multi
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'sale':
return 'sale.mt_order_confirmed'
elif 'state' in init_values and self.state == 'sent':
return 'sale.mt_order_sent'
return super(SaleOrder, self)._track_subtype(init_values)
@api.multi
@api.onchange('partner_shipping_id')
def onchange_partner_shipping_id(self):
"""
Trigger the change of fiscal position when the shipping address is modified.
"""
fiscal_position = self.env['account.fiscal.position'].get_fiscal_position(self.partner_id.id, self.partner_shipping_id.id)
if fiscal_position:
self.fiscal_position_id = fiscal_position
return {}
@api.multi
@api.onchange('partner_id')
def onchange_partner_id(self):
"""
Update the following fields when the partner is changed:
- Pricelist
- Payment term
- Invoice address
- Delivery address
"""
if not self.partner_id:
self.update({
'partner_invoice_id': False,
'partner_shipping_id': False,
'payment_term_id': False,
'fiscal_position_id': False,
})
return
addr = self.partner_id.address_get(['delivery', 'invoice'])
values = {
'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,
'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'note': self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note,
}
if self.partner_id.user_id:
values['user_id'] = self.partner_id.user_id.id
if self.partner_id.team_id:
values['team_id'] = self.partner_id.team_id.id
self.update(values)
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('sale.order') or 'New'
# Makes sure partner_invoice_id', 'partner_shipping_id' and 'pricelist_id' are defined
if any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id']):
partner = self.env['res.partner'].browse(vals.get('partner_id'))
addr = partner.address_get(['delivery', 'invoice'])
vals['partner_invoice_id'] = vals.setdefault('partner_invoice_id', addr['invoice'])
vals['partner_shipping_id'] = vals.setdefault('partner_shipping_id', addr['delivery'])
vals['pricelist_id'] = vals.setdefault('pricelist_id', partner.property_product_pricelist and partner.property_product_pricelist.id)
result = super(SaleOrder, self).create(vals)
return result
@api.multi
def _prepare_invoice(self):
"""
Prepare the dict of values to create the new invoice for a sales order. This method may be
overridden to implement custom invoice generation (making sure to call super() to establish
a clean extension chain).
"""
self.ensure_one()
journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']
if not journal_id:
raise UserError(_('Please define an accounting sale journal for this company.'))
invoice_vals = {
'name': self.client_order_ref or '',
'origin': self.name,
'type': 'out_invoice',
'account_id': self.partner_invoice_id.property_account_receivable_id.id,
'partner_id': self.partner_invoice_id.id,
'journal_id': journal_id,
'currency_id': self.pricelist_id.currency_id.id,
'comment': self.note,
'payment_term_id': self.payment_term_id.id,
'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,
'company_id': self.company_id.id,
'user_id': self.user_id and self.user_id.id,
'team_id': self.team_id.id
}
return invoice_vals
@api.multi
def print_quotation(self):
self.filtered(lambda s: s.state == 'draft').write({'state': 'sent'})
return self.env['report'].get_action(self, 'sale.report_saleorder')
@api.multi
def action_view_invoice(self):
invoice_ids = self.mapped('invoice_ids')
imd = self.env['ir.model.data']
action = imd.xmlid_to_object('account.action_invoice_tree1')
list_view_id = imd.xmlid_to_res_id('account.invoice_tree')
form_view_id = imd.xmlid_to_res_id('account.invoice_form')
result = {
'name': action.name,
'help': action.help,
'type': action.type,
'views': [[list_view_id, 'tree'], [form_view_id, 'form'], [False, 'graph'], [False, 'kanban'], [False, 'calendar'], [False, 'pivot']],
'target': action.target,
'context': action.context,
'res_model': action.res_model,
}
if len(invoice_ids) > 1:
result['domain'] = "[('id','in',%s)]" % invoice_ids.ids
elif len(invoice_ids) == 1:
result['views'] = [(form_view_id, 'form')]
result['res_id'] = invoice_ids.ids[0]
else:
result = {'type': 'ir.actions.act_window_close'}
return result
@api.multi
def action_invoice_create(self, grouped=False, final=False):
"""
Create the invoice associated to the SO.
:param grouped: if True, invoices are grouped by SO id. If False, invoices are grouped by
(partner_invoice_id, currency)
:param final: if True, refunds will be generated if necessary
:returns: list of created invoices
"""
inv_obj = self.env['account.invoice']
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
invoices = {}
for order in self:
group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)
for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):
if float_is_zero(line.qty_to_invoice, precision_digits=precision):
continue
if group_key not in invoices:
inv_data = order._prepare_invoice()
invoice = inv_obj.create(inv_data)
invoices[group_key] = invoice
elif group_key in invoices:
vals = {}
if order.name not in invoices[group_key].origin.split(', '):
vals['origin'] = invoices[group_key].origin + ', ' + order.name
if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(', '):
vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref
invoices[group_key].write(vals)
if line.qty_to_invoice > 0:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
elif line.qty_to_invoice < 0 and final:
line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)
for invoice in invoices.values():
if not invoice.invoice_line_ids:
raise UserError(_('There is no invoicable line.'))
# If invoice is negative, do a refund invoice instead
if invoice.amount_untaxed < 0:
invoice.type = 'out_refund'
for line in invoice.invoice_line_ids:
line.quantity = -line.quantity
# Use additional field helper function (for account extensions)
for line in invoice.invoice_line_ids:
line._set_additional_fields(invoice)
# Necessary to force computation of taxes. In account_invoice, they are triggered
# by onchanges, which are not triggered when doing a create.
invoice.compute_taxes()
return [inv.id for inv in invoices.values()]
@api.multi
def action_draft(self):
orders = self.filtered(lambda s: s.state in ['cancel', 'sent'])
orders.write({
'state': 'draft',
'procurement_group_id': False,
})
orders.mapped('order_line').mapped('procurement_ids').write({'sale_line_id': False})
@api.multi
def action_cancel(self):
self.write({'state': 'cancel'})
@api.multi
def action_quotation_send(self):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = ir_model_data.get_object_reference('sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.multi
def force_quotation_send(self):
for order in self:
email_act = order.action_quotation_send()
if email_act and email_act.get('context'):
email_ctx = email_act['context']
email_ctx.update(default_email_from=order.company_id.email)
order.with_context(email_ctx).message_post_with_template(email_ctx.get('default_template_id'))
return True
@api.multi
def action_done(self):
self.write({'state': 'done'})
@api.model
def _prepare_procurement_group(self):
return {'name': self.name}
@api.multi
def action_confirm(self):
for order in self:
order.state = 'sale'
if self.env.context.get('send_email'):
self.force_quotation_send()
order.order_line._action_procurement_create()
if not order.project_id:
for line in order.order_line:
if line.product_id.invoice_policy == 'cost':
order._create_analytic_account()
break
if self.env['ir.values'].get_default('sale.config.settings', 'auto_done_setting'):
self.action_done()
return True
@api.multi
def _create_analytic_account(self, prefix=None):
for order in self:
name = order.name
if prefix:
name = prefix + ": " + order.name
analytic = self.env['account.analytic.account'].create({
'name': name,
'code': order.client_order_ref,
'company_id': order.company_id.id,
'partner_id': order.partner_id.id
})
order.project_id = analytic
@api.multi
def _notification_group_recipients(self, message, recipients, done_ids, group_data):
group_user = self.env.ref('base.group_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if not recipient.user_ids:
group_data['partner'] |= recipient
else:
group_data['user'] |= recipient
done_ids.add(recipient.id)
return super(SaleOrder, self)._notification_group_recipients(message, recipients, done_ids, group_data)
class SaleOrderLine(models.Model):
_name = 'sale.order.line'
_description = 'Sales Order Line'
_order = 'order_id desc, sequence, id'
@api.depends('state', 'product_uom_qty', 'qty_delivered', 'qty_to_invoice', 'qty_invoiced')
def _compute_invoice_status(self):
"""
Compute the invoice status of a SO line. Possible statuses:
- no: if the SO is not in status 'sale' or 'done', we consider that there is nothing to
invoice. This is also hte default value if the conditions of no other status is met.
- to invoice: we refer to the quantity to invoice of the line. Refer to method
`_get_to_invoice_qty()` for more information on how this quantity is calculated.
- upselling: this is possible only for a product invoiced on ordered quantities for which
we delivered more than expected. The could arise if, for example, a project took more
time than expected but we decided not to invoice the extra cost to the client. This
occurs onyl in state 'sale', so that when a SO is set to done, the upselling opportunity
is removed from the list.
- invoiced: the quantity invoiced is larger or equal to the quantity ordered.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if line.state not in ('sale', 'done'):
line.invoice_status = 'no'
elif not float_is_zero(line.qty_to_invoice, precision_digits=precision):
line.invoice_status = 'to invoice'
elif line.state == 'sale' and line.product_id.invoice_policy == 'order' and\
float_compare(line.qty_delivered, line.product_uom_qty, precision_digits=precision) == 1:
line.invoice_status = 'upselling'
elif float_compare(line.qty_invoiced, line.product_uom_qty, precision_digits=precision) >= 0:
line.invoice_status = 'invoiced'
else:
line.invoice_status = 'no'
@api.depends('product_uom_qty', 'discount', 'price_unit', 'tax_id')
def _compute_amount(self):
"""
Compute the amounts of the SO line.
"""
for line in self:
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)
line.update({
'price_tax': taxes['total_included'] - taxes['total_excluded'],
'price_total': taxes['total_included'],
'price_subtotal': taxes['total_excluded'],
})
@api.depends('product_id.invoice_policy', 'order_id.state')
def _compute_qty_delivered_updateable(self):
for line in self:
line.qty_delivered_updateable = line.product_id.invoice_policy in ('order', 'delivery') and line.order_id.state == 'sale' and line.product_id.track_service == 'manual'
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state')
def _get_to_invoice_qty(self):
"""
Compute the quantity to invoice. If the invoice policy is order, the quantity to invoice is
calculated from the ordered quantity. Otherwise, the quantity delivered is used.
"""
for line in self:
if line.order_id.state in ['sale', 'done']:
if line.product_id.invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
else:
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
@api.depends('invoice_lines.invoice_id.state', 'invoice_lines.quantity')
def _get_invoice_qty(self):
"""
Compute the quantity invoiced. If case of a refund, the quantity invoiced is decreased. Note
that this is the case only if the refund is generated from the SO and that is intentional: if
a refund made would automatically decrease the invoiced quantity, then there is a risk of reinvoicing
it automatically, which may not be wanted at all. That's why the refund has to be created from the SO
"""
for line in self:
qty_invoiced = 0.0
for invoice_line in line.invoice_lines:
if invoice_line.invoice_id.state != 'cancel':
if invoice_line.invoice_id.type == 'out_invoice':
qty_invoiced += self.env['product.uom']._compute_qty_obj(invoice_line.uom_id, invoice_line.quantity, line.product_uom)
elif invoice_line.invoice_id.type == 'out_refund':
qty_invoiced -= self.env['product.uom']._compute_qty_obj(invoice_line.uom_id, invoice_line.quantity, line.product_uom)
line.qty_invoiced = qty_invoiced
@api.depends('price_subtotal', 'product_uom_qty')
def _get_price_reduce(self):
for line in self:
line.price_reduce = line.price_subtotal / line.product_uom_qty if line.product_uom_qty else 0.0
@api.multi
def _compute_tax_id(self):
for line in self:
fpos = line.order_id.fiscal_position_id or line.order_id.partner_id.property_account_position_id
if fpos:
# The superuser is used by website_sale in order to create a sale order. We need to make
# sure we only select the taxes related to the company of the partner. This should only
# apply if the partner is linked to a company.
if self.env.uid == SUPERUSER_ID and line.order_id.company_id:
taxes = fpos.map_tax(line.product_id.taxes_id).filtered(lambda r: r.company_id == line.order_id.company_id)
else:
taxes = fpos.map_tax(line.product_id.taxes_id)
line.tax_id = taxes
else:
line.tax_id = line.product_id.taxes_id if line.product_id.taxes_id else False
@api.multi
def _prepare_order_line_procurement(self, group_id=False):
self.ensure_one()
return {
'name': self.name,
'origin': self.order_id.name,
'date_planned': datetime.strptime(self.order_id.date_order, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=self.customer_lead),
'product_id': self.product_id.id,
'product_qty': self.product_uom_qty,
'product_uom': self.product_uom.id,
'company_id': self.order_id.company_id.id,
'group_id': group_id,
'sale_line_id': self.id
}
@api.multi
def _action_procurement_create(self):
"""
Create procurements based on quantity ordered. If the quantity is increased, new
procurements are created. If the quantity is decreased, no automated action is taken.
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
new_procs = self.env['procurement.order'] #Empty recordset
for line in self:
if line.state != 'sale' or not line.product_id._need_procurement():
continue
qty = 0.0
for proc in line.procurement_ids:
qty += proc.product_qty
if float_compare(qty, line.product_uom_qty, precision_digits=precision) >= 0:
continue
if not line.order_id.procurement_group_id:
vals = line.order_id._prepare_procurement_group()
line.order_id.procurement_group_id = self.env["procurement.group"].create(vals)
vals = line._prepare_order_line_procurement(group_id=line.order_id.procurement_group_id.id)
vals['product_qty'] = line.product_uom_qty - qty
new_proc = self.env["procurement.order"].create(vals)
new_procs += new_proc
new_procs.run()
return new_procs
@api.model
def _get_analytic_invoice_policy(self):
return ['cost']
@api.model
def _get_analytic_track_service(self):
return []
@api.model
def create(self, values):
onchange_fields = ['name', 'price_unit', 'product_uom', 'tax_id']
if values.get('order_id') and values.get('product_id') and any(f not in values for f in onchange_fields):
line = self.new(values)
line.product_id_change()
for field in onchange_fields:
if field not in values:
values[field] = line._fields[field].convert_to_write(line[field])
line = super(SaleOrderLine, self).create(values)
if line.state == 'sale':
if (not line.order_id.project_id and
(line.product_id.track_service in self._get_analytic_track_service() or
line.product_id.invoice_policy in self._get_analytic_invoice_policy())):
line.order_id._create_analytic_account()
line._action_procurement_create()
return line
@api.multi
def write(self, values):
lines = False
if 'product_uom_qty' in values:
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
lines = self.filtered(
lambda r: r.state == 'sale' and float_compare(r.product_uom_qty, values['product_uom_qty'], precision_digits=precision) == -1)
result = super(SaleOrderLine, self).write(values)
if lines:
lines._action_procurement_create()
return result
order_id = fields.Many2one('sale.order', string='Order Reference', required=True, ondelete='cascade', index=True, copy=False)
name = fields.Text(string='Description', required=True)
sequence = fields.Integer(string='Sequence', default=10)
invoice_lines = fields.Many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_line_id', string='Invoice Lines', copy=False)
invoice_status = fields.Selection([
('upselling', 'Upselling Opportunity'),
('invoiced', 'Fully Invoiced'),
('to invoice', 'To Invoice'),
('no', 'Nothing to Invoice')
], string='Invoice Status', compute='_compute_invoice_status', store=True, readonly=True, default='no')
price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price'), default=0.0)
price_subtotal = fields.Monetary(compute='_compute_amount', string='Subtotal', readonly=True, store=True)
price_tax = fields.Monetary(compute='_compute_amount', string='Taxes', readonly=True, store=True)
price_total = fields.Monetary(compute='_compute_amount', string='Total', readonly=True, store=True)
price_reduce = fields.Monetary(compute='_get_price_reduce', string='Price Reduce', readonly=True, store=True)
tax_id = fields.Many2many('account.tax', string='Taxes')
discount = fields.Float(string='Discount (%)', digits=dp.get_precision('Discount'), default=0.0)
product_id = fields.Many2one('product.product', string='Product', domain=[('sale_ok', '=', True)], change_default=True, ondelete='restrict', required=True)
product_uom_qty = fields.Float(string='Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0)
product_uom = fields.Many2one('product.uom', string='Unit of Measure', required=True)
qty_delivered_updateable = fields.Boolean(compute='_compute_qty_delivered_updateable', string='Can Edit Delivered', readonly=True, default=True)
qty_delivered = fields.Float(string='Delivered', copy=False, digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_to_invoice = fields.Float(
compute='_get_to_invoice_qty', string='To Invoice', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'), default=0.0)
qty_invoiced = fields.Float(
compute='_get_invoice_qty', string='Invoiced', store=True, readonly=True,
digits=dp.get_precision('Product Unit of Measure'), default=0.0)
salesman_id = fields.Many2one(related='order_id.user_id', store=True, string='Salesperson', readonly=True)
currency_id = fields.Many2one(related='order_id.currency_id', store=True, string='Currency', readonly=True)
company_id = fields.Many2one(related='order_id.company_id', string='Company', store=True, readonly=True)
order_partner_id = fields.Many2one(related='order_id.partner_id', store=True, string='Customer')
state = fields.Selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('sale', 'Sale Order'),
('done', 'Done'),
('cancel', 'Cancelled'),
], related='order_id.state', string='Order Status', readonly=True, copy=False, store=True, default='draft')
customer_lead = fields.Float(
'Delivery Lead Time', required=True, default=0.0,
help="Number of days between the order confirmation and the shipping of the products to the customer", oldname="delay")
procurement_ids = fields.One2many('procurement.order', 'sale_line_id', string='Procurements')
@api.multi
def _prepare_invoice_line(self, qty):
"""
Prepare the dict of values to create the new invoice line for a sales order line.
:param qty: float quantity to invoice
"""
self.ensure_one()
res = {}
account = self.product_id.property_account_income_id or self.product_id.categ_id.property_account_income_categ_id
if not account:
raise UserError(_('Please define income account for this product: "%s" (id:%d) - or for its category: "%s".') % \
(self.product_id.name, self.product_id.id, self.product_id.categ_id.name))
fpos = self.order_id.fiscal_position_id or self.order_id.partner_id.property_account_position_id
if fpos:
account = fpos.map_account(account)
res = {
'name': self.name,
'sequence': self.sequence,
'origin': self.order_id.name,
'account_id': account.id,
'price_unit': self.price_unit,
'quantity': qty,
'discount': self.discount,
'uom_id': self.product_uom.id,
'product_id': self.product_id.id or False,
'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],
'account_analytic_id': self.order_id.project_id.id,
}
return res
@api.multi
def invoice_line_create(self, invoice_id, qty):
"""
Create an invoice line. The quantity to invoice can be positive (invoice) or negative
(refund).
:param invoice_id: integer
:param qty: float quantity to invoice
"""
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return {'domain': {'product_uom': []}}
vals = {}
domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.product_uom or (self.product_id.uom_id.category_id.id != self.product_uom.category_id.id):
vals['product_uom'] = self.product_id.uom_id
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id
)
name = product.name_get()[0][1]
if product.description_sale:
name += '\n' + product.description_sale
vals['name'] = name
self._compute_tax_id()
if self.order_id.pricelist_id and self.order_id.partner_id:
vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)
self.update(vals)
return {'domain': domain}
@api.onchange('product_uom', 'product_uom_qty')
def product_uom_change(self):
if not self.product_uom:
self.price_unit = 0.0
return
if self.order_id.pricelist_id and self.order_id.partner_id:
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=self.product_uom_qty,
date_order=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.price_unit = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)
@api.multi
def unlink(self):
if self.filtered(lambda x: x.state in ('sale', 'done')):
raise UserError(_('You can not remove a sale order line.\nDiscard changes and try setting the quantity to 0.'))
return super(SaleOrderLine, self).unlink()
@api.multi
def _get_delivered_qty(self):
'''
Intended to be overridden in sale_stock and sale_mrp
:return: the quantity delivered
:rtype: float
'''
return 0.0
class MailComposeMessage(models.TransientModel):
_inherit = 'mail.compose.message'
@api.multi
def send_mail(self, auto_commit=False):
if self._context.get('default_model') == 'sale.order' and self._context.get('default_res_id') and self._context.get('mark_so_as_sent'):
order = self.env['sale.order'].browse([self._context['default_res_id']])
if order.state == 'draft':
order.state = 'sent'
return super(MailComposeMessage, self.with_context(mail_post_autofollow=True)).send_mail(auto_commit=auto_commit)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.model
def _get_default_team(self):
default_team_id = self.env['crm.team']._get_default_team_id()
return self.env['crm.team'].browse(default_team_id)
team_id = fields.Many2one('crm.team', string='Sales Team', default=_get_default_team, oldname='section_id')
@api.multi
def confirm_paid(self):
res = super(AccountInvoice, self).confirm_paid()
todo = set()
for invoice in self:
for line in invoice.invoice_line_ids:
for sale_line in line.sale_line_ids:
todo.add((sale_line.order_id, invoice.number))
for (order, name) in todo:
order.message_post(body=_("Invoice %s paid") % (name))
return res
@api.model
def _refund_cleanup_lines(self, lines):
result = super(AccountInvoice, self)._refund_cleanup_lines(lines)
if self.env.context.get('mode') == 'modify':
for i in xrange(0, len(lines)):
for name, field in lines[i]._fields.iteritems():
if name == 'sale_line_ids':
result[i][2][name] = [(6, 0, lines[i][name].ids)]
lines[i][name] = False
return result
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
sale_line_ids = fields.Many2many('sale.order.line', 'sale_order_line_invoice_rel', 'invoice_line_id', 'order_line_id', string='Sale Order Lines', readonly=True, copy=False)
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
sale_line_id = fields.Many2one('sale.order.line', string='Sale Order Line')
class ProductProduct(models.Model):
_inherit = 'product.product'
@api.multi
def _sales_count(self):
r = {}
domain = [
('state', 'in', ['sale', 'done']),
('product_id', 'in', self.ids),
]
for group in self.env['sale.report'].read_group(domain, ['product_id', 'product_uom_qty'], ['product_id']):
r[group['product_id'][0]] = group['product_uom_qty']
for product in self:
product.sales_count = r.get(product.id, 0)
return r
sales_count = fields.Integer(compute='_sales_count', string='# Sales')
class ProductTemplate(models.Model):
_inherit = 'product.template'
track_service = fields.Selection([('manual', 'Manually set quantities on order')], string='Track Service', default='manual')
@api.multi
@api.depends('product_variant_ids.sales_count')
def _sales_count(self):
for product in self:
product.sales_count = sum([p.sales_count for p in product.product_variant_ids])
@api.multi
def action_view_sales(self):
self.ensure_one()
action = self.env.ref('sale.action_product_sale_list')
product_ids = self.product_variant_ids.ids
return {
'name': action.name,
'help': action.help,
'type': action.type,
'view_type': action.view_type,
'view_mode': action.view_mode,
'target': action.target,
'context': "{'default_product_id': " + str(product_ids[0]) + "}",
'res_model': action.res_model,
'domain': [('state', 'in', ['sale', 'done']), ('product_id.product_tmpl_id', '=', self.id)],
}
sales_count = fields.Integer(compute='_sales_count', string='# Sales')
invoice_policy = fields.Selection(
[('order', 'Ordered quantities'),
('delivery', 'Delivered quantities'),
('cost', 'Invoice based on time and material')],
string='Invoicing Policy', default='order')
| agpl-3.0 |
protoplanet/raytracing | ray_main.py | 1 | 8253 | # ------------------------------------------------------------------------------------------ #
# Description : Python implementation of ray tracing equations stated in PhD thesis of Rice (1997)
# Electron/proton stratification according to Yabroff (1961) + IRI model available
# Geometry is 2D polar
#
# Author : Miroslav Mocak
# Date : 14/October/2016
# Usage : run ray_main.py (this code is OPEN-SOURCE and free to be used/modified by anyone)
# References : Rice W.K.M, 1997, "A ray tracing study of VLF phenomena", PhD thesis,
# : Space Physics Research Institute, Department of Physics, University of Natal
# : Yabroff (1961), Kimura (1966)
# ------------------------------------------------------------------------------------------ #
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
import warnings
import re # python regular expressions
import ray_cmks
import ray_fncts
import ray_plot
import sys
warnings.filterwarnings('ignore')
# ---------------------------------------------- #
# READ INPUT PARAMETERS AND INITIAL CONDITIONS
# ---------------------------------------------- #
file=open('ray_param.dat','r')
next(file) # skip header line
next(file) # skip header line
input=[]
for line in file:
prsvalue = re.search(r'\[(.*)\]', line).group(1) # parse out values from square brackets
input.append(prsvalue)
file.close()
freq_in = float(input[0])
freq_en = float(input[1])
freq_de = float(input[2])
orbit = float(input[3])
frr0 = float(input[4])
dth0 = float(input[5])
dchi0 = float(input[6])
tt0 = float(input[7])
tstop = float(input[8])
nsteps = float(input[9])
pvode = float(input[10])
pzvode = float(input[11])
plsoda = float(input[12])
pdopri5 = float(input[13])
pdop853 = float(input[14])
iono_el = float(input[15])
iono_np = float(input[16])
iono_ir = float(input[17])
iri_fna = input[18]
pwhist = float(input[19])
if (iono_el == 1. and iono_np == 1.) or (iono_el == 1. and iono_ir == 1.) or (iono_np == 1. and iono_ir == 1.):
print('STOP (in ray_main.py): choose only one ionospheric model')
sys.exit()
# load constants
ray_cmks.pconstants()
rr0 = frr0*ray_cmks.Re # initial altitude :: approx 300 km = 1.0471*Re
th0 = dth0*np.pi/180.
chi0 = dchi0*np.pi/180.
G0 = tt0
t0 = 0.
dt = tstop/nsteps # calculate integration step
# bundle initial conditions
rtcG0 = [rr0,th0,chi0,G0]
# introduce some error handling !!
# select chosen ionospheric model
ion = ["0","0"]
# initialize ionosphere
height = []
ne = []
H_ions_per = []
O_ions_per = []
He_ions_per = []
O2_ions_per = []
NO_ions_per = []
N_ions_per = []
ionosphere = [height,ne,O_ions_per,H_ions_per,He_ions_per,O2_ions_per,NO_ions_per,N_ions_per]
if iono_el == 1:
fname = ""
ion = ["iono_el",fname]
if iono_np == 1:
fname = ""
ion = ["iono_np",fname]
if iono_ir == 1:
# fname = "iri_2012_24537_night.lst"
# fname = "iri_2012_25962_day.lst"
fname = iri_fna
# print(fname)
ion = ["iono_ir",fname]
height = []
ne = []
O_ions_per = []
H_ions_per = []
He_ions_per = []
O2_ions_per = []
NO_ions_per = []
N_ions_per = []
# Open file
# fname = 'iri_2012_22651_night.lst'
fname = ion[1]
f = open('IRI/'+fname, 'r')
# Loop over lines and extract variables of interest
for line in f:
line = line.strip()
columns = line.split()
# if float(columns[5]) = -1.:
# columns[5] = 0.
if float(columns[8]) < 0.:
columns[8] = 0.
if float(columns[9]) < 0.:
columns[9] = 0.
if float(columns[10]) < 0.:
columns[10] = 0.
if float(columns[11]) < 0.:
columns[11] = 0.
if float(columns[12]) < 0.:
columns[12] = 0.
if float(columns[13]) < 0.:
columns[13] = 0.
if float(columns[14]) < 0.:
columns[14] = 0.
height.append(float(columns[5])) # height in km
ne.append(float(columns[8])) # electron density in m-3
O_ions_per.append(float(columns[9])) # atomic oxygen O+ ions percentage
H_ions_per.append(float(columns[10])) # atomic hydrogen H+ ions percentage
He_ions_per.append(float(columns[11])) # atomic helium He+ ions percentage
O2_ions_per.append(float(columns[12])) # molecular oxygen O2+ ions percentage
NO_ions_per.append(float(columns[13])) # nitric oxide ions NO+ percentage
N_ions_per.append(float(columns[14])) # atomic nitrogen N+ ions percentage
f.close()
if np.asarray(height[-1]) < orbit:
print('STOP (in ray_main.py): limiting orbit exceeds max altitude of IRI model')
sys.exit()
ionosphere = [height,ne,O_ions_per,H_ions_per,He_ions_per,O2_ions_per,NO_ions_per,N_ions_per]
#print(height[0],rr0-6371200.0,height[-1])
if ion == ["0","0"]:
print("Error in ionospheric model")
# ---------------------------------------------- #
# CALCULATE RHS OF RAY TRACING EQUATIONS
# ---------------------------------------------- #
def f(t, rtcG, freq):
rr, th, chi, G = rtcG # unpack current values
c = 2.99792458e8
n = ray_fncts.phase_refractive_index(rr,th,chi,freq,ion,ionosphere)
dndrr = ray_fncts.deriv_rr(rr,th,chi,freq,ion,ionosphere)
dndth = ray_fncts.deriv_th(rr,th,chi,freq,ion,ionosphere)
dndfr = ray_fncts.deriv_fr(rr,th,chi,freq,ion,ionosphere)
dndch = -n[1]
# ngroup = n[0]+freq*dndfr
derivs = [(1./n[0]**2)*(n[0]*np.cos(chi) + dndch*np.sin(chi)),
(1./(rr*n[0]**2))*(n[0]*np.sin(chi) - dndch*np.cos(chi)),
(1./(rr*n[0]**2))*(dndth*np.cos(chi) - (rr*dndrr+n[0])*np.sin(chi)),
(1./c)*(1.+(freq/n[0])*dndfr)]
return derivs
# ---------------------------------------------- #
# MAIN CALLS ODE SOLVER AND STORES RESULTS
# ---------------------------------------------- #
if pvode == 1:
intype="vode"
if pzvode == 1:
intype="zvode"
if plsoda == 1:
intype="lsoda"
if pdopri5 == 1:
intype="dopri5"
if pdop853 == 1:
intype="dop853"
print('Using ODE integrator: '+str(intype))
print('Limiting height: '+str(orbit)+str(' km'))
# set parameters for plotting
ray_plot.SetMatplotlibParams()
fd = freq_de
fend = int((freq_en-freq_in)/freq_de)
# initial array for frequency and group delay time at chosen orbit
freqb = []
gdtb = []
nphaseb = []
for ii in range(1,fend+1):
freq = freq_in+(ii-1)*fd # vary frequency
print('Calculating ray path: '+str("%.2g" % freq)+' Hz')
psoln = ode(f).set_integrator(intype,method='bdf')
psoln.set_initial_value(rtcG0,t0).set_f_params(freq)
radius = []
latitude = []
gdt = []
nphase = []
while psoln.successful() and psoln.t < tstop and psoln.y[0] > ray_cmks.Re and psoln.y[0] < (ray_cmks.Re+orbit*1.e3):
psoln.integrate(psoln.t+dt)
radius.append(psoln.y[0])
latitude.append(psoln.y[1])
gdt.append(psoln.y[3])
nphase_single = ray_fncts.phase_refractive_index(psoln.y[0],psoln.y[1],psoln.y[2],freq,ion,ionosphere)
nphase.append(nphase_single[0])
# print(ray_fncts.phase_refractive_index(psoln.y[0],psoln.y[1],psoln.y[2],freq,ion,ionosphere))
# print(psoln.y[2],(180./np.pi)*psoln.y[2])
xx = radius[:]*np.cos(latitude[:])
yy = radius[:]*np.sin(latitude[:])
freqb.append(freq)
gdtb.append(gdt[-1])
nphaseb.append(nphase)
ray_plot.finPlot(radius,latitude,gdt,freq,dth0,dchi0,ion,ii)
# ---------------------------------------------- #
# ray_plot RESULTS
# ---------------------------------------------- #
if pwhist == 1:
ray_plot.finGdt(radius,latitude,gdtb,freqb,dth0,dchi0,ion)
# ray_plot.finNphase(radius,latitude,gdtb,freqb,nphase,dth0,dchi0,ion)
plt.show()
plt.clf()
# ---------------------------------------------- #
# END
# ---------------------------------------------- #
| gpl-3.0 |
40223202/2015cdb_g2 | 2015scrum-3a6fed94d45237e506e5e1539e27a2c9e89e6740/static/Brython3.1.1-20150328-091302/Lib/platform.py | 620 | 51006 | #!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary,pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
with open('/etc/'+file, 'r') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = list(map(str,ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from _gestalt import gestalt
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, OSError):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import _gestalt
except ImportError:
return None
# Get the infos
sysv, sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ','_')
platform = platform.replace('/','-')
platform = platform.replace('\\','-')
platform = platform.replace(':','-')
platform = platform.replace(';','-')
platform = platform.replace('"','-')
platform = platform.replace('(','-')
platform = platform.replace(')','-')
# No need to report 'unknown' information...
platform = platform.replace('unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system,node,release,version,machine,processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'Brython' in sys_version:
# IronPython
name = 'Brython'
_parser=re.compile("^(\d+\.\d+\.\d+)[^[]+\[(.*)\]")
match=_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Brython sys.version: %s' %
repr(sys_version))
#version, alt_version, compiler = match.groups()
version, compiler = match.groups()
alt_version = ''
buildno = ''
builddate = ''
elif 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased,terse))
sys.exit(0)
| gpl-3.0 |
PeterWangIntel/chromium-crosswalk | tools/telemetry/telemetry/core/platform/win_platform_backend.py | 5 | 14006 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import collections
import contextlib
import ctypes
import logging
import os
import platform
import re
import socket
import struct
import subprocess
import sys
import time
import zipfile
from telemetry.core import exceptions
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import platform_backend
from telemetry.core.platform.power_monitor import msr_power_monitor
from telemetry.core import util
from telemetry import decorators
from telemetry.util import cloud_storage
from telemetry.util import path
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
from win32com.shell import shell # pylint: disable=F0401,E0611
from win32com.shell import shellcon # pylint: disable=F0401,E0611
import win32con # pylint: disable=F0401
import win32gui # pylint: disable=F0401
import win32process # pylint: disable=F0401
import win32security # pylint: disable=F0401
except ImportError:
pywintypes = None
shell = None
shellcon = None
win32api = None
win32con = None
win32gui = None
win32process = None
win32security = None
def _InstallWinRing0():
"""WinRing0 is used for reading MSRs."""
executable_dir = os.path.dirname(sys.executable)
python_is_64_bit = sys.maxsize > 2 ** 32
dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
dll_path = os.path.join(executable_dir, dll_file_name)
os_is_64_bit = platform.machine().endswith('64')
driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
driver_path = os.path.join(executable_dir, driver_file_name)
# Check for WinRing0 and download if needed.
if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
win_binary_dir = os.path.join(
path.GetTelemetryDir(), 'bin', 'win', 'AMD64')
zip_path = os.path.join(win_binary_dir, 'winring0.zip')
cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
try:
with zipfile.ZipFile(zip_path, 'r') as zip_file:
error_message = (
'Failed to extract %s into %s. If python claims that '
'the zip file is locked, this may be a lie. The problem may be '
'that python does not have write permissions to the destination '
'directory.'
)
# Install DLL.
if not os.path.exists(dll_path):
try:
zip_file.extract(dll_file_name, executable_dir)
except:
logging.error(error_message % (dll_file_name, executable_dir))
raise
# Install kernel driver.
if not os.path.exists(driver_path):
try:
zip_file.extract(driver_file_name, executable_dir)
except:
logging.error(error_message % (driver_file_name, executable_dir))
raise
finally:
os.remove(zip_path)
def TerminateProcess(process_handle):
if not process_handle:
return
if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
win32process.TerminateProcess(process_handle, 0)
process_handle.close()
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def __init__(self):
super(WinPlatformBackend, self).__init__()
self._msr_server_handle = None
self._msr_server_port = None
self._power_monitor = msr_power_monitor.MsrPowerMonitor(self)
@classmethod
def IsPlatformBackendForHost(cls):
return sys.platform == 'win32'
def __del__(self):
self.close()
def close(self):
self.CloseMsrServer()
def CloseMsrServer(self):
if not self._msr_server_handle:
return
TerminateProcess(self._msr_server_handle)
self._msr_server_handle = None
self._msr_server_port = None
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
@decorators.Cache
def GetArchName(self):
return platform.machine()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return platform_backend.XP
if os_version.startswith('6.0.'):
return platform_backend.VISTA
if os_version.startswith('6.1.'):
return platform_backend.WIN7
if os_version.startswith('6.2.'):
return platform_backend.WIN8
raise NotImplementedError('Unknown win version %s.' % os_version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
# pylint: disable=bad-super-call
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
def IsCurrentProcessElevated(self):
if self.GetOSVersionName() < platform_backend.VISTA:
# TOKEN_QUERY is not defined before Vista. All processes are elevated.
return True
handle = win32process.GetCurrentProcess()
with contextlib.closing(
win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
return bool(win32security.GetTokenInformation(
token, win32security.TokenElevation))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launch an application. Returns a PyHANDLE object."""
parameters = ' '.join(parameters) if parameters else ''
if elevate_privilege and not self.IsCurrentProcessElevated():
# Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
# elevate privileges. A new console will be created if the new process has
# different permissions than this process.
proc_info = shell.ShellExecuteEx(
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
lpVerb='runas' if elevate_privilege else '',
lpFile=application,
lpParameters=parameters,
nShow=win32con.SW_HIDE)
if proc_info['hInstApp'] <= 32:
raise Exception('Unable to launch %s' % application)
return proc_info['hProcess']
else:
handle, _, _, _ = win32process.CreateProcess(
None, application + ' ' + parameters, None, None, False,
win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
return handle
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def _StartMsrServerIfNeeded(self):
if self._msr_server_handle:
return
_InstallWinRing0()
self._msr_server_port = util.GetUnreservedAvailableLocalPort()
# It might be flaky to get a port number without reserving it atomically,
# but if the server process chooses a port, we have no way of getting it.
# The stdout of the elevated process isn't accessible.
parameters = (
os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
str(self._msr_server_port),
)
self._msr_server_handle = self.LaunchApplication(
sys.executable, parameters, elevate_privilege=True)
# Wait for server to start.
try:
socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
except socket.error:
self.CloseMsrServer()
atexit.register(TerminateProcess, self._msr_server_handle)
def ReadMsr(self, msr_number, start=0, length=64):
self._StartMsrServerIfNeeded()
if not self._msr_server_handle:
raise OSError('Unable to start MSR server.')
sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 0.1)
try:
sock.sendall(struct.pack('I', msr_number))
response = sock.recv(8)
finally:
sock.close()
return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
def IsCooperativeShutdownSupported(self):
return True
def CooperativelyShutdown(self, proc, app_name):
pid = proc.pid
# http://timgolden.me.uk/python/win32_how_do_i/
# find-the-window-for-my-subprocess.html
#
# It seems that intermittently this code manages to find windows
# that don't belong to Chrome -- for example, the cmd.exe window
# running slave.bat on the tryservers. Try to be careful about
# finding only Chrome's windows. This works for both the browser
# and content_shell.
#
# It seems safest to send the WM_CLOSE messages after discovering
# all of the sub-process's windows.
def find_chrome_windows(hwnd, hwnds):
_, win_pid = win32process.GetWindowThreadProcessId(hwnd)
if (pid == win_pid and
win32gui.IsWindowVisible(hwnd) and
win32gui.IsWindowEnabled(hwnd) and
win32gui.GetClassName(hwnd).lower().startswith(app_name)):
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(find_chrome_windows, hwnds)
if hwnds:
for hwnd in hwnds:
win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
return True
else:
logging.info('Did not find any windows owned by target process')
return False
| bsd-3-clause |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/management/commands/inspectdb.py | 315 | 1466 | from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/tests/regressiontests/i18n/contenttypes/tests.py | 26 | 1080 | # coding: utf-8
from __future__ import with_statement
import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import translation
class ContentTypeTests(TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(unicode(company_type), u'Company')
with translation.override('fr'):
self.assertEqual(unicode(company_type), u'Société')
def test_field_override(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
company_type.name = 'Other'
self.assertEqual(unicode(company_type), 'Other')
ContentTypeTests = override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(__file__), 'locale'),
),
LANGUAGE_CODE='en',
LANGUAGES=(
('en', 'English'),
('fr', 'French'),
),
)(ContentTypeTests)
| apache-2.0 |
brunojppb/javascript-the-basics | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
mzszym/oedes | oedes/tests/test_poisson.py | 1 | 2032 | # -*- coding: utf-8; -*-
#
# oedes - organic electronic device simulator
# Copyright (C) 2017-2018 Marek Zdzislaw Szymanski (marek@marekszymanski.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This tests if Poisson's equation is solved correctly in most simple cases
from oedes import *
from oedes.fvm import mesh1d
from oedes.models import BaseModel, Poisson, AppliedVoltage
L = 100e-9
v0 = 1.
v1 = -1.
mesh = mesh1d(L)
def run_poisson(bc):
b = BaseModel()
b.poisson = Poisson(mesh)
b.poisson.bc = bc
params = {'T': 300., 'electrode0.voltage': v0, 'electrode1.voltage': v1,
'electrode0.workfunction': 0., 'electrode1.workfunction': 0., 'epsilon_r': 3.}
b.setUp()
solution = solve(b, b.X, params)
out = b.output(0., solution, 0. * b.X, params)
return out
def test_poisson_DirichletDirichlet():
out = run_poisson([AppliedVoltage('electrode0'),
AppliedVoltage('electrode1')])
assert np.allclose(out['E'], (v0 - v1) / L)
assert np.allclose(out['potential'], v0 + (v1 - v0)
* mesh.cells['center'] / L)
def test_poisson_DirichletOpen():
out = run_poisson([AppliedVoltage('electrode0')])
assert np.allclose(out['E'], 0.)
assert np.allclose(out['potential'], v0)
def test_poisson_OpenDirichlet():
# # Open-Dirichlet
out = run_poisson([AppliedVoltage('electrode1')])
assert np.allclose(out['E'], 0.)
assert np.allclose(out['potential'], v1)
| agpl-3.0 |
chaveiro/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| lgpl-2.1 |
Samuc/Proyecto-IV | lib/python2.7/site-packages/pip/commands/zip.py | 393 | 14821 | import sys
import re
import fnmatch
import os
import shutil
import zipfile
from pip.util import display_path, backup_dir, rmtree
from pip.log import logger
from pip.exceptions import InstallationError
from pip.basecommand import Command
class ZipCommand(Command):
"""Zip individual packages."""
name = 'zip'
usage = """
%prog [options] <package> ..."""
summary = 'DEPRECATED. Zip individual packages.'
def __init__(self, *args, **kw):
super(ZipCommand, self).__init__(*args, **kw)
if self.name == 'zip':
self.cmd_opts.add_option(
'--unzip',
action='store_true',
dest='unzip',
help='Unzip (rather than zip) a package.')
else:
self.cmd_opts.add_option(
'--zip',
action='store_false',
dest='unzip',
default=True,
help='Zip (rather than unzip) a package.')
self.cmd_opts.add_option(
'--no-pyc',
action='store_true',
dest='no_pyc',
help='Do not include .pyc files in zip files (useful on Google App Engine).')
self.cmd_opts.add_option(
'-l', '--list',
action='store_true',
dest='list',
help='List the packages available, and their zip status.')
self.cmd_opts.add_option(
'--sort-files',
action='store_true',
dest='sort_files',
help='With --list, sort packages according to how many files they contain.')
self.cmd_opts.add_option(
'--path',
action='append',
dest='paths',
help='Restrict operations to the given paths (may include wildcards).')
self.cmd_opts.add_option(
'-n', '--simulate',
action='store_true',
help='Do not actually perform the zip/unzip operation.')
self.parser.insert_option_group(0, self.cmd_opts)
def paths(self):
"""All the entries of sys.path, possibly restricted by --path"""
if not self.select_paths:
return sys.path
result = []
match_any = set()
for path in sys.path:
path = os.path.normcase(os.path.abspath(path))
for match in self.select_paths:
match = os.path.normcase(os.path.abspath(match))
if '*' in match:
if re.search(fnmatch.translate(match + '*'), path):
result.append(path)
match_any.add(match)
break
else:
if path.startswith(match):
result.append(path)
match_any.add(match)
break
else:
logger.debug("Skipping path %s because it doesn't match %s"
% (path, ', '.join(self.select_paths)))
for match in self.select_paths:
if match not in match_any and '*' not in match:
result.append(match)
logger.debug("Adding path %s because it doesn't match "
"anything already on sys.path" % match)
return result
def run(self, options, args):
logger.deprecated('1.7', "DEPRECATION: 'pip zip' and 'pip unzip` are deprecated, and will be removed in a future release.")
self.select_paths = options.paths
self.simulate = options.simulate
if options.list:
return self.list(options, args)
if not args:
raise InstallationError(
'You must give at least one package to zip or unzip')
packages = []
for arg in args:
module_name, filename = self.find_package(arg)
if options.unzip and os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a zip file; cannot be unzipped'
% (module_name, filename))
elif not options.unzip and not os.path.isdir(filename):
raise InstallationError(
'The module %s (in %s) is not a directory; cannot be zipped'
% (module_name, filename))
packages.append((module_name, filename))
last_status = None
for module_name, filename in packages:
if options.unzip:
last_status = self.unzip_package(module_name, filename)
else:
last_status = self.zip_package(module_name, filename, options.no_pyc)
return last_status
def unzip_package(self, module_name, filename):
zip_filename = os.path.dirname(filename)
if not os.path.isfile(zip_filename) and zipfile.is_zipfile(zip_filename):
raise InstallationError(
'Module %s (in %s) isn\'t located in a zip file in %s'
% (module_name, filename, zip_filename))
package_path = os.path.dirname(zip_filename)
if not package_path in self.paths():
logger.warn(
'Unpacking %s into %s, but %s is not on sys.path'
% (display_path(zip_filename), display_path(package_path),
display_path(package_path)))
logger.notify('Unzipping %s (in %s)' % (module_name, display_path(zip_filename)))
if self.simulate:
logger.notify('Skipping remaining operations because of --simulate')
return
logger.indent += 2
try:
## FIXME: this should be undoable:
zip = zipfile.ZipFile(zip_filename)
to_save = []
for info in zip.infolist():
name = info.filename
if name.startswith(module_name + os.path.sep):
content = zip.read(name)
dest = os.path.join(package_path, name)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
if not content and dest.endswith(os.path.sep):
if not os.path.exists(dest):
os.makedirs(dest)
else:
f = open(dest, 'wb')
f.write(content)
f.close()
else:
to_save.append((name, zip.read(name)))
zip.close()
if not to_save:
logger.info('Removing now-empty zip file %s' % display_path(zip_filename))
os.unlink(zip_filename)
self.remove_filename_from_pth(zip_filename)
else:
logger.info('Removing entries in %s/ from zip file %s' % (module_name, display_path(zip_filename)))
zip = zipfile.ZipFile(zip_filename, 'w')
for name, content in to_save:
zip.writestr(name, content)
zip.close()
finally:
logger.indent -= 2
def zip_package(self, module_name, filename, no_pyc):
orig_filename = filename
logger.notify('Zip %s (in %s)' % (module_name, display_path(filename)))
logger.indent += 2
if filename.endswith('.egg'):
dest_filename = filename
else:
dest_filename = filename + '.zip'
try:
## FIXME: I think this needs to be undoable:
if filename == dest_filename:
filename = backup_dir(orig_filename)
logger.notify('Moving %s aside to %s' % (orig_filename, filename))
if not self.simulate:
shutil.move(orig_filename, filename)
try:
logger.info('Creating zip file in %s' % display_path(dest_filename))
if not self.simulate:
zip = zipfile.ZipFile(dest_filename, 'w')
zip.writestr(module_name + '/', '')
for dirpath, dirnames, filenames in os.walk(filename):
if no_pyc:
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
for fns, is_dir in [(dirnames, True), (filenames, False)]:
for fn in fns:
full = os.path.join(dirpath, fn)
dest = os.path.join(module_name, dirpath[len(filename):].lstrip(os.path.sep), fn)
if is_dir:
zip.writestr(dest + '/', '')
else:
zip.write(full, dest)
zip.close()
logger.info('Removing old directory %s' % display_path(filename))
if not self.simulate:
rmtree(filename)
except:
## FIXME: need to do an undo here
raise
## FIXME: should also be undone:
self.add_filename_to_pth(dest_filename)
finally:
logger.indent -= 2
def remove_filename_from_pth(self, filename):
for pth in self.pth_files():
f = open(pth, 'r')
lines = f.readlines()
f.close()
new_lines = [
l for l in lines if l.strip() != filename]
if lines != new_lines:
logger.info('Removing reference to %s from .pth file %s'
% (display_path(filename), display_path(pth)))
if not [line for line in new_lines if line]:
logger.info('%s file would be empty: deleting' % display_path(pth))
if not self.simulate:
os.unlink(pth)
else:
if not self.simulate:
f = open(pth, 'wb')
f.writelines(new_lines)
f.close()
return
logger.warn('Cannot find a reference to %s in any .pth file' % display_path(filename))
def add_filename_to_pth(self, filename):
path = os.path.dirname(filename)
dest = filename + '.pth'
if path not in self.paths():
logger.warn('Adding .pth file %s, but it is not on sys.path' % display_path(dest))
if not self.simulate:
if os.path.exists(dest):
f = open(dest)
lines = f.readlines()
f.close()
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.append(filename + '\n')
else:
lines = [filename + '\n']
f = open(dest, 'wb')
f.writelines(lines)
f.close()
def pth_files(self):
for path in self.paths():
if not os.path.exists(path) or not os.path.isdir(path):
continue
for filename in os.listdir(path):
if filename.endswith('.pth'):
yield os.path.join(path, filename)
def find_package(self, package):
for path in self.paths():
full = os.path.join(path, package)
if os.path.exists(full):
return package, full
if not os.path.isdir(path) and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
try:
zip.read(os.path.join(package, '__init__.py'))
except KeyError:
pass
else:
zip.close()
return package, full
zip.close()
## FIXME: need special error for package.py case:
raise InstallationError(
'No package with the name %s found' % package)
def list(self, options, args):
if args:
raise InstallationError(
'You cannot give an argument with --list')
for path in sorted(self.paths()):
if not os.path.exists(path):
continue
basename = os.path.basename(path.rstrip(os.path.sep))
if os.path.isfile(path) and zipfile.is_zipfile(path):
if os.path.dirname(path) not in self.paths():
logger.notify('Zipped egg: %s' % display_path(path))
continue
if (basename != 'site-packages' and basename != 'dist-packages'
and not path.replace('\\', '/').endswith('lib/python')):
continue
logger.notify('In %s:' % display_path(path))
logger.indent += 2
zipped = []
unzipped = []
try:
for filename in sorted(os.listdir(path)):
ext = os.path.splitext(filename)[1].lower()
if ext in ('.pth', '.egg-info', '.egg-link'):
continue
if ext == '.py':
logger.info('Not displaying %s: not a package' % display_path(filename))
continue
full = os.path.join(path, filename)
if os.path.isdir(full):
unzipped.append((filename, self.count_package(full)))
elif zipfile.is_zipfile(full):
zipped.append(filename)
else:
logger.info('Unknown file: %s' % display_path(filename))
if zipped:
logger.notify('Zipped packages:')
logger.indent += 2
try:
for filename in zipped:
logger.notify(filename)
finally:
logger.indent -= 2
else:
logger.notify('No zipped packages.')
if unzipped:
if options.sort_files:
unzipped.sort(key=lambda x: -x[1])
logger.notify('Unzipped packages:')
logger.indent += 2
try:
for filename, count in unzipped:
logger.notify('%s (%i files)' % (filename, count))
finally:
logger.indent -= 2
else:
logger.notify('No unzipped packages.')
finally:
logger.indent -= 2
def count_package(self, path):
total = 0
for dirpath, dirnames, filenames in os.walk(path):
filenames = [f for f in filenames
if not f.lower().endswith('.pyc')]
total += len(filenames)
return total
| gpl-2.0 |
quizlet/grpc | tools/run_tests/python_utils/filter_pull_request_tests.py | 1 | 7545 | #!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter out tests based on file differences compared to merge target branch"""
from __future__ import print_function
import re
import six
from subprocess import check_output
class TestSuite:
"""
Contains label to identify job as belonging to this test suite and
triggers to identify if changed files are relevant
"""
def __init__(self, labels):
"""
Build TestSuite to group tests based on labeling
:param label: strings that should match a jobs's platform, config, language, or test group
"""
self.triggers = []
self.labels = labels
def add_trigger(self, trigger):
"""
Add a regex to list of triggers that determine if a changed file should run tests
:param trigger: regex matching file relevant to tests
"""
self.triggers.append(trigger)
# Create test suites
_CORE_TEST_SUITE = TestSuite(['c'])
_CPP_TEST_SUITE = TestSuite(['c++'])
_CSHARP_TEST_SUITE = TestSuite(['csharp'])
_NODE_TEST_SUITE = TestSuite(['node', 'grpc-node'])
_OBJC_TEST_SUITE = TestSuite(['objc'])
_PHP_TEST_SUITE = TestSuite(['php', 'php7'])
_PYTHON_TEST_SUITE = TestSuite(['python'])
_RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos'])
_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
_NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
_PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
_WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
# Dictionary of whitelistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_WHITELIST_DICT = {
'^doc/': [],
'^examples/': [],
'^include/grpc\+\+/': [_CPP_TEST_SUITE],
'^summerofcode/': [],
'^src/cpp/': [_CPP_TEST_SUITE],
'^src/csharp/': [_CSHARP_TEST_SUITE],
'^src/node/': [_NODE_TEST_SUITE],
'^src/objective\-c/': [_OBJC_TEST_SUITE],
'^src/php/': [_PHP_TEST_SUITE],
'^src/python/': [_PYTHON_TEST_SUITE],
'^src/ruby/': [_RUBY_TEST_SUITE],
'^templates/': [],
'^test/core/': [_CORE_TEST_SUITE],
'^test/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
'^test/distrib/node/': [_NODE_TEST_SUITE],
'^test/distrib/php/': [_PHP_TEST_SUITE],
'^test/distrib/python/': [_PYTHON_TEST_SUITE],
'^test/distrib/ruby/': [_RUBY_TEST_SUITE],
'^vsprojects/': [_WINDOWS_TEST_SUITE],
'binding\.gyp$': [_NODE_TEST_SUITE],
'composer\.json$': [_PHP_TEST_SUITE],
'config\.m4$': [_PHP_TEST_SUITE],
'CONTRIBUTING\.md$': [],
'Gemfile$': [_RUBY_TEST_SUITE],
'grpc\.def$': [_WINDOWS_TEST_SUITE],
'grpc\.gemspec$': [_RUBY_TEST_SUITE],
'gRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
'INSTALL\.md$': [],
'LICENSE$': [],
'MANIFEST\.md$': [],
'package\.json$': [_PHP_TEST_SUITE],
'package\.xml$': [_PHP_TEST_SUITE],
'PATENTS$': [],
'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
'README\.md$': [],
'requirements\.txt$': [_PYTHON_TEST_SUITE],
'setup\.cfg$': [_PYTHON_TEST_SUITE],
'setup\.py$': [_PYTHON_TEST_SUITE]
}
# Regex that combines all keys in _WHITELIST_DICT
_ALL_TRIGGERS = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
# Add all triggers to their respective test suites
for trigger, test_suites in six.iteritems(_WHITELIST_DICT):
for test_suite in test_suites:
test_suite.add_trigger(trigger)
def _get_changed_files(base_branch):
"""
Get list of changed files between current branch and base of target merge branch
"""
# Get file changes between branch and merge-base of specified branch
# Not combined to be Windows friendly
base_commit = check_output(["git", "merge-base", base_branch, "HEAD"]).rstrip()
return check_output(["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
def _can_skip_tests(file_names, triggers):
"""
Determines if tests are skippable based on if all files do not match list of regexes
:param file_names: list of changed files generated by _get_changed_files()
:param triggers: list of regexes matching file name that indicates tests should be run
:return: safe to skip tests
"""
for file_name in file_names:
if any(re.match(trigger, file_name) for trigger in triggers):
return False
return True
def _remove_irrelevant_tests(tests, skippable_labels):
"""
Filters out tests by config or language - will not remove sanitizer tests
:param tests: list of all tests generated by run_tests_matrix.py
:param skippable_labels: list of languages and platforms with skippable tests
:return: list of relevant tests
"""
# test.labels[0] is platform and test.labels[2] is language
# We skip a test if both are considered safe to skip
return [test for test in tests if test.labels[0] not in skippable_labels or \
test.labels[2] not in skippable_labels]
def affects_c_cpp(base_branch):
"""
Determines if a pull request's changes affect C/C++. This function exists because
there are pull request tests that only test C/C++ code
:param base_branch: branch that a pull request is requesting to merge into
:return: boolean indicating whether C/C++ changes are made in pull request
"""
changed_files = _get_changed_files(base_branch)
# Run all tests if any changed file is not in the whitelist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return True
return not _can_skip_tests(changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
def filter_tests(tests, base_branch):
"""
Filters out tests that are safe to ignore
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(' %s' % changed_file)
print('')
# Run all tests if any changed file is not in the whitelist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return(tests)
# Figure out which language and platform tests to run
skippable_labels = []
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
print(' %s tests safe to skip' % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests
| apache-2.0 |
WSDC-NITWarangal/django | django/core/serializers/base.py | 273 | 7678 | """
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
@classmethod
def WithData(cls, original_exc, model, fk, field_value):
"""
Factory method for creating a deserialization error which has a more
explanatory messsage.
"""
return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value))
class ProgressBar(object):
progress_width = 75
def __init__(self, output, total_count):
self.output = output
self.total_count = total_count
self.prev_done = 0
def update(self, count):
if not self.output:
return
perc = count * 100 // self.total_count
done = perc * self.progress_width // 100
if self.prev_done >= done:
return
self.prev_done = done
cr = '' if self.total_count == 1 else '\r'
self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']')
if done == self.progress_width:
self.output.write('\n')
self.output.flush()
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
progress_class = ProgressBar
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method')
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_object() method')
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_field() method')
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method')
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method')
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s(pk=%s)>" % (
self.object._meta.label, self.object.pk)
def save(self, save_m2m=True, using=None, **kwargs):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# raw=True is passed to any pre/post_save signals.
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
def build_instance(Model, data, db):
"""
Build a model instance.
If the model instance doesn't have a primary key and the model supports
natural keys, try to retrieve it from the database.
"""
obj = Model(**data)
if (obj.pk is None and hasattr(Model, 'natural_key') and
hasattr(Model._default_manager, 'get_by_natural_key')):
natural_key = obj.natural_key()
try:
obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk
except Model.DoesNotExist:
pass
return obj
| bsd-3-clause |
denmojo/pygrow | grow/server/main_test.py | 1 | 3238 | from grow.pods import pods
from grow.server import main
from grow.testing import testing
import unittest
import webapp2
class PodHandlerTestCase(unittest.TestCase):
def test_request(self):
self.dir_path = testing.create_test_pod_dir()
pod = pods.Pod(self.dir_path)
# When serving a pod, should 200.
app = main.create_wsgi_app(pod)
request = webapp2.Request.blank('/')
response = request.get_response(app)
self.assertEqual(200, response.status_int)
# Verify 404 is sent for page not found.
request = webapp2.Request.blank('/dummy/page/')
response = request.get_response(app)
self.assertEqual(404, response.status_int)
# Verify 206 for partial content.
headers = {'Range': 'bytes=0-4'}
request = webapp2.Request.blank('/public/file.txt', headers=headers)
response = request.get_response(app)
self.assertEqual(206, response.status_int)
self.assertEqual('bytes 0-4/13', response.headers['Content-Range'])
self.assertEqual('Hello', response.body)
headers = {'Range': 'bytes=5-13'}
request = webapp2.Request.blank('/public/file.txt', headers=headers)
response = request.get_response(app)
self.assertEqual('bytes 5-12/13', response.headers['Content-Range'])
self.assertEqual(' World!\n', response.body)
# Verify full response when headers omitted.
request = webapp2.Request.blank('/public/file.txt')
response = request.get_response(app)
self.assertEqual('Hello World!\n', response.body)
# Verify 304.
url_path = '/public/file.txt'
controller, params = pod.match(url_path)
response_headers = controller.get_http_headers(params)
headers = {'If-None-Match': response_headers['Last-Modified']}
request = webapp2.Request.blank(url_path, headers=headers)
response = request.get_response(app)
self.assertEqual(304, response.status_int)
self.assertEqual('', response.body)
response = request.get_response(app)
self.assertEqual(304, response.status_int)
self.assertEqual('', response.body)
# Verify sitemap on server.
path = '/root/sitemap.xml'
request = webapp2.Request.blank(path, headers=headers)
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual('application/xml', response.headers['Content-Type'])
def test_ui(self):
dir_path = testing.create_test_pod_dir()
pod = pods.Pod(dir_path)
app = main.create_wsgi_app(pod)
# Verify JS and CSS are served.
request = webapp2.Request.blank('/_grow/ui/js/ui.min.js')
response = request.get_response(app)
self.assertEqual(200, response.status_int)
js_sentinel = '!function'
self.assertIn(js_sentinel, response.body)
request = webapp2.Request.blank('/_grow/ui/css/ui.min.css')
response = request.get_response(app)
self.assertEqual(200, response.status_int)
css_sentinel = '#grow'
self.assertIn(css_sentinel, response.body)
if __name__ == '__main__':
unittest.main()
| mit |
wrouesnel/ansible | lib/ansible/playbook/role/__init__.py | 26 | 19263 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, binary_type, text_type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins.loader import get_all_plugin_loaders
from ansible.utils.vars import combine_vars
__all__ = ['Role', 'hash_params']
# TODO: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
"""
Construct a data structure of parameters that is hashable.
This requires changing any mutable data structures into immutable ones.
We chose a frozenset because role parameters have to be unique.
.. warning:: this does not handle unhashable scalars. Two things
mitigate that limitation:
1) There shouldn't be any unhashable scalars specified in the yaml
2) Our only choice would be to return an error anyway.
"""
# Any container is unhashable if it contains unhashable items (for
# instance, tuple() is a Hashable subclass but if it contains a dict, it
# cannot be hashed)
if isinstance(params, collections.Container) and not isinstance(params, (text_type, binary_type)):
if isinstance(params, collections.Mapping):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params.items())
except TypeError:
new_params = set()
for k, v in params.items():
# Hash each entry individually
new_params.update((k, hash_params(v)))
new_params = frozenset(new_params)
elif isinstance(params, (collections.Set, collections.Sequence)):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params)
except TypeError:
new_params = set()
for v in params:
# Hash each entry individually
new_params.update(hash_params(v))
new_params = frozenset(new_params)
else:
# This is just a guess.
new_params = frozenset(params)
return new_params
# Note: We do not handle unhashable scalars but our only choice would be
# to raise an error there anyway.
return frozenset((params,))
class Role(Base, Become, Conditional, Taggable):
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, from_files=None):
self._role_name = None
self._role_path = None
self._role_params = dict()
self._loader = None
self._metadata = None
self._play = play
self._parents = []
self._dependencies = []
self._task_blocks = []
self._handler_blocks = []
self._default_vars = dict()
self._role_vars = dict()
self._had_task_run = dict()
self._completed = dict()
if from_files is None:
from_files = {}
self._from_files = from_files
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._role_name
@staticmethod
def load(role_include, play, parent_role=None, from_files=None):
if from_files is None:
from_files = {}
try:
# The ROLE_CACHE is a dictionary of role names, with each entry
# containing another dictionary corresponding to a set of parameters
# specified for a role as the key and the Role() object itself.
# We use frozenset to make the dictionary hashable.
params = role_include.get_role_params()
if role_include.when is not None:
params['when'] = role_include.when
if role_include.tags is not None:
params['tags'] = role_include.tags
if from_files is not None:
params['from_files'] = from_files
if role_include.vars:
params['vars'] = role_include.vars
hashed_params = hash_params(params)
if role_include.role in play.ROLE_CACHE:
for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]):
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
return role_obj
r = Role(play=play, from_files=from_files)
r._load_role_data(role_include, parent_role=parent_role)
if role_include.role not in play.ROLE_CACHE:
play.ROLE_CACHE[role_include.role] = dict()
play.ROLE_CACHE[role_include.role][hashed_params] = r
return r
except RuntimeError:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
# copy over all field attributes, except for when and tags, which
# are special cases and need to preserve pre-existing values
for (attr_name, _) in iteritems(self._valid_attrs):
if attr_name not in ('when', 'tags'):
setattr(self, attr_name, getattr(role_include, attr_name))
current_when = getattr(self, 'when')[:]
current_when.extend(role_include.when)
setattr(self, 'when', current_when)
current_tags = getattr(self, 'tags')[:]
current_tags.extend(role_include.tags)
setattr(self, 'tags', current_tags)
# dynamically load any plugins from the role directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(self._role_path, obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
# load the role's other files, if they exist
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
self._dependencies = self._load_dependencies()
else:
self._metadata = RoleMetadata()
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=task_data, orig_exc=e)
handler_data = self._load_role_yaml('handlers')
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=handler_data, orig_exc=e)
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'))
if self._role_vars is None:
self._role_vars = dict()
elif not isinstance(self._role_vars, dict):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'))
if self._default_vars is None:
self._default_vars = dict()
elif not isinstance(self._default_vars, dict):
raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
def _load_role_yaml(self, subdir, main=None):
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
main_file = self._resolve_main(file_path, main)
if self._loader.path_exists(main_file):
return self._loader.load_from_file(main_file)
elif main is not None:
raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
return None
def _resolve_main(self, basepath, main=None):
''' flexibly handle variations in main filenames '''
post = False
# allow override if set, otherwise use default
if main is None:
main = 'main'
post = True
bare_main = os.path.join(basepath, main)
possible_mains = (
os.path.join(basepath, '%s.yml' % main),
os.path.join(basepath, '%s.yaml' % main),
os.path.join(basepath, '%s.json' % main),
)
if post:
possible_mains = possible_mains + (bare_main,)
else:
possible_mains = (bare_main,) + possible_mains
if sum([self._loader.is_file(x) for x in possible_mains]) > 1:
raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
else:
for m in possible_mains:
if self._loader.is_file(m):
return m # exactly one main file
return possible_mains[0] # zero mains (we still need to return something)
def _load_dependencies(self):
'''
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
'''
deps = []
if self._metadata:
for role_include in self._metadata.dependencies:
r = Role.load(role_include, play=self._play, parent_role=self)
deps.append(r)
return deps
# other functions
def add_parent(self, parent_role):
''' adds a role to the list of this roles parents '''
if not isinstance(parent_role, Role):
raise AnsibleAssertionError()
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_default_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
if dep_chain:
for parent in dep_chain:
default_vars = combine_vars(default_vars, parent._default_vars)
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
inherited_vars = dict()
if dep_chain:
for parent in dep_chain:
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
return inherited_vars
def get_role_params(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
params = {}
if dep_chain:
for parent in dep_chain:
params = combine_vars(params, parent._role_params)
params = combine_vars(params, self._role_params)
return params
def get_vars(self, dep_chain=None, include_params=True):
dep_chain = [] if dep_chain is None else dep_chain
all_vars = self.get_inherited_vars(dep_chain)
for dep in self.get_all_dependencies():
all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params))
all_vars = combine_vars(all_vars, self.vars)
all_vars = combine_vars(all_vars, self._role_vars)
if include_params:
all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
'''
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
'''
child_deps = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
child_deps.append(child_dep)
child_deps.append(dep)
return child_deps
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self, play, dep_chain=None):
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):
'''
Returns true if this role has been iterated over completely and
at least one task was run
'''
return host.name in self._completed and not self._metadata.allow_duplicates
def compile(self, play, dep_chain=None):
'''
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
'''
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for idx, task_block in enumerate(self._task_blocks):
new_task_block = task_block.copy(exclude_parent=True)
if task_block._parent:
new_task_block._parent = task_block._parent.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
if idx == len(self._task_blocks) - 1:
new_task_block._eor = True
block_list.append(new_task_block)
return block_list
def serialize(self, include_deps=True):
res = super(Role, self).serialize()
res['_role_name'] = self._role_name
res['_role_path'] = self._role_path
res['_role_vars'] = self._role_vars
res['_role_params'] = self._role_params
res['_default_vars'] = self._default_vars
res['_had_task_run'] = self._had_task_run.copy()
res['_completed'] = self._completed.copy()
if self._metadata:
res['_metadata'] = self._metadata.serialize()
if include_deps:
deps = []
for role in self.get_direct_dependencies():
deps.append(role.serialize())
res['_dependencies'] = deps
parents = []
for parent in self._parents:
parents.append(parent.serialize(include_deps=False))
res['_parents'] = parents
return res
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', dict())
self._completed = data.get('_completed', dict())
if include_deps:
deps = []
for dep in data.get('_dependencies', []):
r = Role()
r.deserialize(dep)
deps.append(r)
setattr(self, '_dependencies', deps)
parent_data = data.get('_parents', [])
parents = []
for parent in parent_data:
r = Role()
r.deserialize(parent, include_deps=False)
parents.append(r)
setattr(self, '_parents', parents)
metadata_data = data.get('_metadata')
if metadata_data:
m = RoleMetadata()
m.deserialize(metadata_data)
self._metadata = m
super(Role, self).deserialize(data)
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| gpl-3.0 |
shangwuhencc/pcl | geometry/include/pcl/geometry/mesh_indices.py | 67 | 11907 | ##
# Software License Agreement (BSD License)
#
# Point Cloud Library (PCL) - www.pointclouds.org
# Copyright (c) 2009-2012, Willow Garage, Inc.
# Copyright (c) 2012-, Open Perception, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# # Neither the name of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
filename = os.path.join (os.path.dirname (__file__), 'mesh_indices.h')
class_names = ['VertexIndex', 'HalfEdgeIndex', 'EdgeIndex', 'FaceIndex']
################################################################################
f = open (filename, 'w')
f.write ('/*\n')
f.write (' * Software License Agreement (BSD License)\n')
f.write (' *\n')
f.write (' * Point Cloud Library (PCL) - www.pointclouds.org\n')
f.write (' * Copyright (c) 2009-2012, Willow Garage, Inc.\n')
f.write (' * Copyright (c) 2012-, Open Perception, Inc.\n')
f.write (' *\n')
f.write (' * All rights reserved.\n')
f.write (' *\n')
f.write (' * Redistribution and use in source and binary forms, with or without\n')
f.write (' * modification, are permitted provided that the following conditions\n')
f.write (' * are met:\n')
f.write (' *\n')
f.write (' * * Redistributions of source code must retain the above copyright\n')
f.write (' * notice, this list of conditions and the following disclaimer.\n')
f.write (' * * Redistributions in binary form must reproduce the above\n')
f.write (' * copyright notice, this list of conditions and the following\n')
f.write (' * disclaimer in the documentation and/or other materials provided\n')
f.write (' * with the distribution.\n')
f.write (' * * Neither the name of the copyright holder(s) nor the names of its\n')
f.write (' * contributors may be used to endorse or promote products derived\n')
f.write (' * from this software without specific prior written permission.\n')
f.write (' *\n')
f.write (' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n')
f.write (' * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n')
f.write (' * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n')
f.write (' * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n')
f.write (' * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n')
f.write (' * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n')
f.write (' * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n')
f.write (' * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n')
f.write (' * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n')
f.write (' * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n')
f.write (' * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n')
f.write (' * POSSIBILITY OF SUCH DAMAGE.\n')
f.write (' *\n')
f.write (' * $Id$\n')
f.write (' *\n')
f.write (' */\n\n')
f.write ("// NOTE: This file has been created with 'pcl_src/geometry/include/pcl/geometry/mesh_indices.py'\n\n")
f.write ('#ifndef PCL_GEOMETRY_MESH_INDICES_H\n')
f.write ('#define PCL_GEOMETRY_MESH_INDICES_H\n\n')
f.write ('#include <iostream>\n\n')
f.write ('#include <pcl/geometry/boost.h>\n\n')
for cn in class_names:
f.write ('////////////////////////////////////////////////////////////////////////////////\n')
f.write ('// ' + cn + '\n')
f.write ('////////////////////////////////////////////////////////////////////////////////\n\n')
f.write ('namespace pcl\n')
f.write ('{\n')
f.write (' namespace geometry\n')
f.write (' {\n')
f.write (' /** \\brief Index used to access elements in the half-edge mesh. It is basically just a wrapper around an integer with a few added methods.\n')
f.write (' * \\author Martin Saelzle\n')
f.write (' * \ingroup geometry\n')
f.write (' */\n')
f.write (' class ' + cn + '\n')
f.write (' : boost::totally_ordered <pcl::geometry::' + cn + ' // < > <= >= == !=\n')
f.write (' , boost::unit_steppable <pcl::geometry::' + cn + ' // ++ -- (pre and post)\n')
f.write (' , boost::additive <pcl::geometry::' + cn + ' // += + -= -\n')
f.write (' > > >\n')
f.write (' {\n')
f.write (' public:\n\n')
f.write (' typedef boost::totally_ordered <pcl::geometry::' + cn + ',\n')
f.write (' boost::unit_steppable <pcl::geometry::' + cn + ',\n')
f.write (' boost::additive <pcl::geometry::' + cn + '> > > Base;\n')
f.write (' typedef pcl::geometry::' + cn + ' Self;\n\n')
f.write (' /** \\brief Constructor. Initializes with an invalid index. */\n')
f.write (' ' + cn + ' ()\n')
f.write (' : index_ (-1)\n')
f.write (' {\n')
f.write (' }\n\n')
f.write (' /** \\brief Constructor.\n')
f.write (' * \param[in] index The integer index.\n')
f.write (' */\n')
f.write (' explicit ' + cn + ' (const int index)\n')
f.write (' : index_ (index)\n')
f.write (' {\n')
f.write (' }\n\n')
f.write (' /** \\brief Returns true if the index is valid. */\n')
f.write (' inline bool\n')
f.write (' isValid () const\n')
f.write (' {\n')
f.write (' return (index_ >= 0);\n')
f.write (' }\n\n')
f.write (' /** \\brief Invalidate the index. */\n')
f.write (' inline void\n')
f.write (' invalidate ()\n')
f.write (' {\n')
f.write (' index_ = -1;\n')
f.write (' }\n\n')
f.write (' /** \\brief Get the index. */\n')
f.write (' inline int\n')
f.write (' get () const\n')
f.write (' {\n')
f.write (' return (index_);\n')
f.write (' }\n\n')
f.write (' /** \\brief Set the index. */\n')
f.write (' inline void\n')
f.write (' set (const int index)\n')
f.write (' {\n')
f.write (' index_ = index;\n')
f.write (' }\n\n')
f.write (' /** \\brief Comparison operators (with boost::operators): < > <= >= */\n')
f.write (' inline bool\n')
f.write (' operator < (const Self& other) const\n')
f.write (' {\n')
f.write (' return (this->get () < other.get ());\n')
f.write (' }\n\n')
f.write (' /** \\brief Comparison operators (with boost::operators): == != */\n')
f.write (' inline bool\n')
f.write (' operator == (const Self& other) const\n')
f.write (' {\n')
f.write (' return (this->get () == other.get ());\n')
f.write (' }\n\n')
f.write (' /** \\brief Increment operators (with boost::operators): ++ (pre and post) */\n')
f.write (' inline Self&\n')
f.write (' operator ++ ()\n')
f.write (' {\n')
f.write (' ++index_;\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
f.write (' /** \\brief Decrement operators (with boost::operators): \-\- (pre and post) */\n')
f.write (' inline Self&\n')
f.write (' operator -- ()\n')
f.write (' {\n')
f.write (' --index_;\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
f.write (' /** \\brief Addition operators (with boost::operators): + += */\n')
f.write (' inline Self&\n')
f.write (' operator += (const Self& other)\n')
f.write (' {\n')
f.write (' index_ += other.get ();\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
f.write (' /** \\brief Subtraction operators (with boost::operators): - -= */\n')
f.write (' inline Self&\n')
f.write (' operator -= (const Self& other)\n')
f.write (' {\n')
f.write (' index_ -= other.get ();\n')
f.write (' return (*this);\n')
f.write (' }\n\n')
f.write (' private:\n\n')
f.write (' /** \\brief Stored index. */\n')
f.write (' int index_;\n\n')
f.write (' friend std::istream&\n')
f.write (' operator >> (std::istream& is, pcl::geometry::' + cn + '& index);\n')
f.write (' };\n\n')
f.write (' /** \\brief ostream operator. */\n')
f.write (' inline std::ostream&\n')
f.write (' operator << (std::ostream& os, const pcl::geometry::' + cn + '& index)\n')
f.write (' {\n')
f.write (' return (os << index.get ());\n')
f.write (' }\n\n')
f.write (' /** \\brief istream operator. */\n')
f.write (' inline std::istream&\n')
f.write (' operator >> (std::istream& is, pcl::geometry::' + cn + '& index)\n')
f.write (' {\n')
f.write (' return (is >> index.index_);\n')
f.write (' }\n\n')
f.write (' } // End namespace geometry\n')
f.write ('} // End namespace pcl\n\n')
f.write ('////////////////////////////////////////////////////////////////////////////////\n')
f.write ('// Conversions\n')
f.write ('////////////////////////////////////////////////////////////////////////////////\n\n')
f.write ('namespace pcl\n')
f.write ('{\n')
f.write (' namespace geometry\n')
f.write (' {\n')
f.write (' /** \\brief Convert the given half-edge index to an edge index. */\n')
f.write (' inline pcl::geometry::EdgeIndex\n')
f.write (' toEdgeIndex (const HalfEdgeIndex& index)\n')
f.write (' {\n')
f.write (' return (index.isValid () ? EdgeIndex (index.get () / 2) : EdgeIndex ());\n')
f.write (' }\n\n')
f.write (' /** \\brief Convert the given edge index to a half-edge index.\n')
f.write (' * \\param[in] get_first The first half-edge of the edge is returned if this variable is true; elsewise the second.\n')
f.write (' */\n')
f.write (' inline pcl::geometry::HalfEdgeIndex\n')
f.write (' toHalfEdgeIndex (const EdgeIndex& index, const bool get_first=true)\n')
f.write (' {\n')
f.write (' return (index.isValid () ? HalfEdgeIndex (index.get () * 2 + static_cast <int> (!get_first)) : HalfEdgeIndex ());\n')
f.write (' }\n')
f.write (' } // End namespace geometry\n')
f.write ('} // End namespace pcl\n\n')
f.write ('#endif // PCL_GEOMETRY_MESH_INDICES_H\n')
f.close()
| bsd-3-clause |
sajeeshcs/nested_quota_final | nova/tests/unit/api/openstack/common.py | 63 | 1685 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
def webob_factory(url):
"""Factory for removing duplicate webob code from tests."""
base_url = url
def web_request(url, method=None, body=None):
req = webob.Request.blank("%s%s" % (base_url, url))
if method:
req.content_type = "application/json"
req.method = method
if body:
req.body = jsonutils.dumps(body)
return req
return web_request
def compare_links(actual, expected):
"""Compare xml atom links."""
return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type'))
def compare_media_types(actual, expected):
"""Compare xml media types."""
return compare_tree_to_dict(actual, expected, ('base', 'type'))
def compare_tree_to_dict(actual, expected, keys):
"""Compare parts of lxml.etree objects to dicts."""
for elem, data in zip(actual, expected):
for key in keys:
if elem.get(key) != data.get(key):
return False
return True
| apache-2.0 |
Davidjohnwilson/sympy | sympy/logic/algorithms/dpll.py | 58 | 9316 | """Implementation of DPLL algorithm
Further improvements: eliminate calls to pl_true, implement branching rules,
efficient unit propagation.
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
- http://bioinformatics.louisville.edu/ouyang/MingOuyangThesis.pdf
"""
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy import default_sort_key
from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \
to_int_repr, _find_predicates
from sympy.logic.inference import pl_true, literal_symbol
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
clauses = conjuncts(to_cnf(expr))
if False in clauses:
return False
symbols = sorted(_find_predicates(expr), key=default_sort_key)
symbols_int_repr = set(range(1, len(symbols) + 1))
clauses_int_repr = to_int_repr(clauses, symbols)
result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {})
if not result:
return result
output = {}
for key in result:
output.update({symbols[key - 1]: result[key]})
return output
def dpll(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Clauses is an array of conjuncts.
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import dpll
>>> dpll([A, B, D], [A, B], {D: False})
False
"""
# compute DP kernel
P, value = find_unit_clause(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_unit_clause(clauses, model)
P, value = find_pure_symbol(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_pure_symbol(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
if not clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols[:]
return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or
dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy))
def dpll_int_repr(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Arguments are expected to be in integer representation
>>> from sympy.logic.algorithms.dpll import dpll_int_repr
>>> dpll_int_repr([set([1]), set([2]), set([3])], set([1, 2]), {3: False})
False
"""
# compute DP kernel
P, value = find_unit_clause_int_repr(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_unit_clause_int_repr(clauses, model)
P, value = find_pure_symbol_int_repr(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_pure_symbol_int_repr(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true_int_repr(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols.copy()
return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or
dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy))
### helper methods for DPLL
def pl_true_int_repr(clause, model={}):
"""
Lightweight version of pl_true.
Argument clause represents the set of args of an Or clause. This is used
inside dpll_int_repr, it is not meant to be used directly.
>>> from sympy.logic.algorithms.dpll import pl_true_int_repr
>>> pl_true_int_repr(set([1, 2]), {1: False})
>>> pl_true_int_repr(set([1, 2]), {1: False, 2: False})
False
"""
result = False
for lit in clause:
if lit < 0:
p = model.get(-lit)
if p is not None:
p = not p
else:
p = model.get(lit)
if p is True:
return True
elif p is None:
result = None
return result
def unit_propagate(clauses, symbol):
"""
Returns an equivalent set of clauses
If a set of clauses contains the unit clause l, the other clauses are
simplified by the application of the two following rules:
1. every clause containing l is removed
2. in every clause that contains ~l this literal is deleted
Arguments are expected to be in CNF.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import unit_propagate
>>> unit_propagate([A | B, D | ~B, B], B)
[D, B]
"""
output = []
for c in clauses:
if c.func != Or:
output.append(c)
continue
for arg in c.args:
if arg == ~symbol:
output.append(Or(*[x for x in c.args if x != ~symbol]))
break
if arg == symbol:
break
else:
output.append(c)
return output
def unit_propagate_int_repr(clauses, s):
"""
Same as unit_propagate, but arguments are expected to be in integer
representation
>>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr
>>> unit_propagate_int_repr([set([1, 2]), set([3, -2]), set([2])], 2)
[set([3])]
"""
negated = set([-s])
return [clause - negated for clause in clauses if s not in clause]
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_pure_symbol
>>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A])
(A, True)
"""
for sym in symbols:
found_pos, found_neg = False, False
for c in unknown_clauses:
if not found_pos and sym in disjuncts(c):
found_pos = True
if not found_neg and Not(sym) in disjuncts(c):
found_neg = True
if found_pos != found_neg:
return sym, found_pos
return None, None
def find_pure_symbol_int_repr(symbols, unknown_clauses):
"""
Same as find_pure_symbol, but arguments are expected
to be in integer representation
>>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr
>>> find_pure_symbol_int_repr(set([1,2,3]),
... [set([1, -2]), set([-2, -3]), set([3, 1])])
(1, True)
"""
all_symbols = set().union(*unknown_clauses)
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None
def find_unit_clause(clauses, model):
"""
A unit clause has only 1 variable that is not bound in the model.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_unit_clause
>>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True})
(B, False)
"""
for clause in clauses:
num_not_in_model = 0
for literal in disjuncts(clause):
sym = literal_symbol(literal)
if sym not in model:
num_not_in_model += 1
P, value = sym, not (literal.func is Not)
if num_not_in_model == 1:
return P, value
return None, None
def find_unit_clause_int_repr(clauses, model):
"""
Same as find_unit_clause, but arguments are expected to be in
integer representation.
>>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr
>>> find_unit_clause_int_repr([set([1, 2, 3]),
... set([2, -3]), set([1, -2])], {1: True})
(2, False)
"""
bound = set(model) | set(-sym for sym in model)
for clause in clauses:
unbound = clause - bound
if len(unbound) == 1:
p = unbound.pop()
if p < 0:
return -p, False
else:
return p, True
return None, None
| bsd-3-clause |
rgeleta/odoo | addons/hr_recruitment/wizard/hr_recruitment_create_partner_job.py | 337 | 3434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_recruitment_partner_create(osv.osv_memory):
_name = 'hr.recruitment.partner.create'
_description = 'Create Partner from job application'
_columns = {
'close': fields.boolean('Close job request'),
}
def view_init(self, cr, uid, fields_list, context=None):
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
if case.partner_id:
raise osv.except_osv(_('Error!'),
_('A contact is already defined on this job request.'))
pass
def make_order(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
case_obj = self.pool.get('hr.applicant')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj._get_id(cr, uid, 'base', 'view_res_partner_filter')
res = mod_obj.read(cr, uid, result, ['res_id'], context=context)
for case in case_obj.browse(cr, uid, context['active_ids'], context=context):
partner_id = partner_obj.search(cr, uid, [('name', '=', case.partner_name or case.name)], context=context)
if partner_id:
raise osv.except_osv(_('Error!'),_('A contact is already existing with the same name.'))
partner_id = partner_obj.create(cr, uid, {
'name': case.partner_name or case.name,
'user_id': case.user_id.id,
'comment': case.description,
'phone': case.partner_phone,
'mobile': case.partner_mobile,
'email': case.email_from
}, context=context)
case_obj.write(cr, uid, [case.id], {
'partner_id': partner_id,
}, context=context)
return {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liutc10/digit_recog | code/vlfeat-0.9.20/docsrc/webdoc.py | 34 | 44995 | #!/usr/bin/python
# file: webdoc.py
# author: Andrea Vedaldi
# description: A website formatter utility
# Copyright (C) 2007-13 Andrea Vedaldi and Brian Fulkerson.
# All rights reserved.
#
# This file is part of the VLFeat library and is made available under
# the terms of the BSD license (see the COPYING file).
import cProfile
import types
import xml.sax
import xml.sax.saxutils
import re
import os
import sys
import random
import copy
import htmlentitydefs
from xml.sax.handler import ContentHandler
from xml.sax import parse
from urlparse import urlparse
from urlparse import urlunparse
from optparse import OptionParser
from doxytag import Doxytag
# this is used for syntax highlighting
try:
import pygments
import pygments.lexers
import pygments.formatters
has_pygments = True
except ImportError:
has_pygments = False
DOCTYPE_XHTML_TRANSITIONAL = \
'<!DOCTYPE html PUBLIC ' \
'"-//W3C//DTD XHTML 1.0 Transitional//EN" ' \
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
# Create a dictonary that maps unicode characters to HTML entities
mapUnicodeToHtmlEntity = { }
for k, v in htmlentitydefs.name2codepoint.items():
c = unichr(v)
if c == u'&' or c == u'<' or c == u'>': continue
mapUnicodeToHtmlEntity [c] = "&%s;" % k
# This indexes the document nodes by ID
nodeIndex = { }
nodeUniqueCount = 0
doxygenIndex = None
doxygenDir = ''
def getDoxygenURL(tag):
url = ''
rootURL = nodeIndex['root'].getPublishURL()
if rootURL: url += rootURL + '/'
if doxygenDir: url += doxygenDir + '/'
url += doxygenIndex.index[tag]
return url
def getUniqueNodeID(id = None):
"""
getUniqueNodeID() generates an unique ID for a document node.
getUniqueNodeID(id) generates an unique ID adding a suffix to id.
"""
global nodeUniqueCount
if id is None: id = "id"
uniqueId = id
while uniqueId in nodeIndex:
nodeUniqueCount += 1
uniqueId = "%s-%d" % (id, nodeUniqueCount)
return uniqueId
def dumpIndex():
"""
Dump the node index, for debugging purposes.
"""
for x in nodeIndex.itervalues():
print x
def ensureDir(dirName):
"""
Create the directory DIRNAME if it does not exsits.
"""
if os.path.isdir(dirName):
pass
elif os.path.isfile(dirName):
raise OSError("cannot create the direcory '%s'"
"because there exists already "
"a file with that name" % newdir)
else:
head, tail = os.path.split(dirName)
if head and not os.path.isdir(head):
ensureDir(head)
if tail:
os.mkdir(dirName)
def calcRelURL(toURL, fromURL):
"""
Calculates a relative URL.
"""
fromURL = urlparse(fromURL)
toURL = urlparse(toURL)
if not fromURL.scheme == toURL.scheme: return urlunparse(toURL)
if not fromURL.netloc == toURL.netloc: return urlunparse(toURL)
fromPath = fromURL.path.split("/")
toPath = toURL.path.split("/")
for j in xrange(len(fromPath) - 1): fromPath[j] += u"/"
for j in xrange(len(toPath) - 1): toPath[j] += u"/"
# abs path: ['/', 'dir1/', ..., 'dirN/', 'file']
# rel path: ['dir1/', ..., 'dirN/', 'file']
# path with no file: ['dir1/', ..., 'dirN/', '']
# find common path (but do not count file name)
i = 0
while True:
if i >= len(fromPath) - 1: break
if i >= len(toPath) - 1: break
if not fromPath[i] == toPath[i]: break
i = i + 1
# a/b/c/d.html --> ../../../d.html
# a/b//c/d.html --> ../../../d.html
for j in xrange(len(fromPath) - 1):
if len(fromPath[j]) > 1: fromPath[j] = u"../"
else: fromPath[j] = u""
fromPath = fromPath[i:-1]
toPath = toPath[i:]
relPath = u"".join(fromPath) + "".join(toPath)
return urlunparse(("", "", relPath, "", "", toURL.fragment))
def walkNodes(rootNode, nodeType = None, nodeBarrier = None):
if nodeBarrier and rootNode.isA(nodeBarrier):
return
for n in rootNode.getChildren():
for m in walkNodes(n, nodeType, nodeBarrier):
yield m
if not nodeType or rootNode.isA(nodeType):
yield rootNode
def walkAncestors(leafNode, nodeType = None):
if not nodeType or leafNode.isA(nodeType):
yield leafNode
p = leafNode.getParent()
if p:
for m in walkAncestors(p, nodeType):
yield m
# --------------------------------------------------------------------
class DocLocation:
# --------------------------------------------------------------------
"""
A location consisting of a URL (file), a row number, and a column number.
"""
def __init__(self, URL, row, column):
self.URL = URL
self.row = row
self.column = column
def __str__(self):
return "%s:%d:%d" % (self.URL,
self.row,
self.column)
# --------------------------------------------------------------------
class DocError(BaseException):
# --------------------------------------------------------------------
"""
An error consisting of a stack of locations and a message.
"""
def __init__(self, message):
BaseException.__init__(self,message)
self.locations = []
def __str__(self):
str = ""
if len(self.locations) > 0:
for i in xrange(len(self.locations)-1,0,-1):
str += "included from %s:\n" % self.locations[i]
return str + "%s: error: %s" % (self.locations[0], BaseException.__str__(self))
else:
return self.message
def appendLocation(self, location):
self.locations.append(location)
return self
# --------------------------------------------------------------------
class makeGuard(object):
# --------------------------------------------------------------------
"""
Decorates the method of a DocNode object so that,
on raising a DocError exception, the location of the node
is appended to it.
"""
def __init__(self, func):
self.func = func
def __call__(self, obj, *args, **keys):
try:
self.func(obj, *args, **keys)
except DocError, e:
if len(e.locations) == 0:
e.appendLocation(obj.getLocation())
raise e
except:
raise
def __get__(self, obj, type=None):
return types.MethodType(self, obj, type)
# --------------------------------------------------------------------
class DocBareNode:
# --------------------------------------------------------------------
"""
A node of the document tree without parent, children, or any
other attribute. It is used to implement common leaf nodes such
as text chunks.
"""
def __init__(self): pass
def isA(self, classInfo):
"""
Returns TRUE if the node is of class CLASSINFO.
"""
return isinstance(self, classInfo)
def getChildren(self):
"""
Returs an empty list
"""
return []
def setParent(self, parent): pass
def getPublishDirName(self): pass
def getPublishFileName(self): pass
def getPublishURL(self): pass
def visit(self, generator): pass
def publish(self, generator, pageNode = None): pass
def publishIndex(self, gen, inPage, activePageNodes, full=False): return False
# --------------------------------------------------------------------
class DocNode(DocBareNode):
# --------------------------------------------------------------------
"""
A node of the document with a parent, childern, attributes, and
additional meta-information such as the location
of the XML element that caused this node to be generated.
"""
def __init__(self, attrs, URL, locator):
self.parent = None
self.children = []
self.attrs = attrs
self.sourceURL = None
self.sourceRow = None
self.sourceColumn = None
if attrs.has_key('id'):
self.id = attrs['id']
else:
self.id = getUniqueNodeID()
self.sourceURL = URL
if locator:
self.sourceRow = locator.getLineNumber()
self.sourceColumn = locator.getColumnNumber()
nodeIndex[self.id] = self
def __str__(self):
return "%s:%s -> %s" % (self.getLocation(), self.getID(), self.getPublishURL())
def dump(self):
"""
Recusively dump the tree of nodes, for debugging purposes.
"""
depth = self.getDepth()
print " " * depth, self
for x in self.children: x.dump()
def getID(self):
"""
Return the node ID.
"""
return self.id
def getParent(self):
"""
Return the node parent.
"""
return self.parent
def getChildren(self):
"""
Return the list of node children.
"""
return self.children
def getAttributes(self):
"""
Return the dictionary of node attributes.
"""
return self.attrs
def getDepth(self):
"""
Return the depth of the node in the tree.
"""
if self.parent:
return self.parent.getDepth() + 1
else:
return 0
def setParent(self, parent):
"""
Set the parent of the node.
"""
self.parent = parent
def adopt(self, orfan):
"""
Adds ORFAN to the node children and make the node the parent
of ORFAN. ORFAN can also be a sequence of orfans.
"""
self.children.append(orfan)
orfan.setParent(self)
def findAncestors(self, nodeType = None):
"""
Return the node ancestors of type NODETYPE. If NODETYPE is
None, returns all ancestors.
"""
if nodeType is None:
nodeType = DocNode
if self.parent:
if self.parent.isA(nodeType):
found = [self.parent]
else:
found = []
found = found + self.parent.findAncestors(nodeType)
return found
return []
def findChildren(self, nodeType = None):
"""
Returns the node chldren of type NODTYPE. If NODETYPE is None,
returns all children.
"""
if nodeType is None:
nodeType = DocNode
return [x for x in self.children if x.isA(nodeType)]
def getLocation(self):
"""
Get the location (file, row number, and column number)
where this node was instanitated
"""
location = DocLocation(self.sourceURL,
self.sourceRow,
self.sourceColumn)
if self.parent:
parentLocation = self.parent.getLocation()
if location.URL is None: location.URL = parentLocation.URL
if location.row is None: location.URL = parentLocation.row
if location.column is None: location.URL = parentLocation.column
return location
def getPublishDirName(self):
"""
Returns the publish dir name of the parent.
"""
if self.parent:
return self.parent.getPublishDirName()
return None
def getPublishFileName(self):
"""
Returns NONE.
"""
return None
def getPublishURL(self):
"""
Returns NONE.
"""
return None
def visit(self, generator):
"""
Recursively calls VISIT() on its children.
"""
for c in self.getChildren():
c.visit(generator)
return None
def publish(self, generator, pageNode = None):
"""
Recursively calls PUBLISH() on its children.
"""
for c in self.getChildren():
c.publish(generator, pageNode)
return None
publish = makeGuard(publish)
def publishIndex(self, gen, inPage, activePageNodes, full=False):
"""
Recursively calls PUBLISHINDEX() on its children.
"""
hasIndexedChildren = False
for c in self.getChildren():
hasIndexedChildren = c.publishIndex(gen, inPage, activePageNodes, full) \
or hasIndexedChildren
return hasIndexedChildren
def publishTableOfContents(self, gen, pageNode):
"""
Create a TOC corresponding to the H1, H2, ... tags in a DocPage."
"""
gen.putString("<div class='toc'>\n")
gen.putString("<h3>Table of Contents</h3>")
previousLevel = 0
for q in pageNode.getChildren():
for x in walkNodes(q, DocHtmlElement, DocPage):
if x.tag not in ['h1', 'h2', 'h3', 'h4', 'h5']: continue
level = int(x.tag[1]) # e.g. h2 -> level = 2
title = "".join([y.text for y in walkNodes(x, DocHtmlText)])
while previousLevel < level:
gen.putString("<ul>")
previousLevel += 1
while previousLevel > level:
gen.putString("</ul>\n")
previousLevel -= 1
gen.putString('<li class="level%d">'
'<a href="#%s">%s</a>'
'</li>\n' % (level, x.id, title))
while previousLevel > 0:
gen.putString("</ul>\n")
previousLevel -= 1
gen.putString("</div><!-- Table of contents -->\n")
def expandAttr(self, value, pageNode):
"""
Expand an attribute by substituting any directive with its value.
"""
xvalue = ""
next = 0
for m in re.finditer("%[-\w._#:]+;", value):
if next < m.start():
xvalue += value[next : m.start()]
next = m.end()
directive = value[m.start()+1 : m.end()-1]
mo = re.match('pathto:(.*)', directive)
if mo:
toNodeID = mo.group(1)
toNodeURL = None
if nodeIndex.has_key(toNodeID):
toNodeURL = nodeIndex[toNodeID].getPublishURL()
if toNodeURL is None:
print "%s: warning: could not cross-reference '%s'" % (self.getLocation(), toNodeID)
toNodeURL = toNodeID
fromPageURL = pageNode.getPublishURL()
xvalue += calcRelURL(toNodeURL, fromPageURL)
continue
mo = re.match('env:(.*)', directive)
if mo:
envName = mo.group(1)
if envName in os.environ:
xvalue += os.environ[envName]
else:
print "%s: warning: the environment variable '%s' not defined" % (self.getLocation(), envName)
continue
mo = re.match('dox:(.*)', directive)
if mo:
if doxygenIndex is None:
if opts.verb > 1:
print "%s: warning: no Doxygen tag file loaded, skipping this directive." % self.getLocation()
continue
if not mo.group(1) in doxygenIndex.index:
print "%s: warning: the ID %s was not found in the Doxygen tag file." % (self.getLocation(), mo.group(2))
continue
toNodeURL = getDoxygenURL(mo.group(1))
fromPageURL = pageNode.getPublishURL()
xvalue += calcRelURL(toNodeURL, fromPageURL)
continue
raise DocError("unknown directive '%s' found while expanding an attribute" % directive)
if next < len(value): xvalue += value[next:]
#print "EXPAND: ", value, " -> ", xvalue
return xvalue
# --------------------------------------------------------------------
class DocInclude(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
if not attrs.has_key("src"):
raise DocError("include missing 'src' attribute")
self.filePath = attrs["src"]
def __str__(self):
return DocNode.__str__(self) + ":<web:include src=%s>" \
% xml.sax.saxutils.quoteattr(self.filePath)
# --------------------------------------------------------------------
class DocDir(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
if not attrs.has_key("name"):
raise DocError("dir tag missing 'name' attribute")
self.dirName = attrs["name"]
def __str__(self):
return DocNode.__str__(self) + ":<web:dir name=%s>" \
% xml.sax.saxutils.quoteattr(self.dirName)
def getPublishDirName(self):
return self.parent.getPublishDirName() + self.dirName + os.sep
def visit(self, generator):
generator.changeDir(self.dirName)
DocNode.visit(self, generator)
generator.parentDir()
# --------------------------------------------------------------------
class DocGroup(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def __str__(self):
return DocNode.__str__(self) + ":<web:group>"
# --------------------------------------------------------------------
class DocCDATAText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":CDATA text:" + self.text
def publish(self, gen, pageNode = None):
gen.putString(self.text)
# --------------------------------------------------------------------
class DocCDATA(DocNode):
# --------------------------------------------------------------------
def __init__(self):
DocNode.__init__(self, {}, None, None)
def __str__(self):
return DocNode.__str__(self) + ":CDATA"
def publish(self, gen, pageNode = None):
gen.putString("<![CDATA[")
DocNode.publish(self, gen, pageNode)
gen.putString("]]>")
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocHtmlText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":text:'" + \
self.text.encode('utf-8').encode('string_escape') + "'"
def publish(self, gen, pageNode = None):
# find occurences of %directive; in the text node and do the
# appropriate substitutions
next = 0
for m in re.finditer("%(\w+)(?::([-\w._#]+))?;", self.text):
if next < m.start():
gen.putXMLString(self.text[next : m.start()])
next = m.end()
directive = self.text[m.start()+1 : m.end()-1]
directive = m.group(1)
if m.group(2):
options = [x.strip().lower() for x in m.group(2).split(',')]
else:
options = []
if directive == "content":
pageNode.publish(gen, pageNode)
elif directive == "pagestyle":
for q in pageNode.getChildren():
for s in walkNodes(q, DocPageStyle, DocPage):
s.expand(gen, pageNode)
elif directive == "pagescript":
for q in pageNode.getChildren():
for s in walkNodes(q, DocPageScript, DocPage):
s.expand(gen, pageNode)
elif directive == "pagetitle":
gen.putString(pageNode.title)
elif directive == "path":
ancPages = [x for x in walkAncestors(pageNode, DocPage)]
plain=False
for option in options:
if option=="plain":
plain=True
else:
print "warning: ignoring unknown option '%s' while expanding 'path'" % option
if ancPages is not None:
for i,p in enumerate(reversed(ancPages)):
if plain:
if i > 0: gen.putString(" > ")
gen.putString(p.title)
else:
if i > 0: gen.putString("<span class='separator'>></span>")
gen.putString("<span class='page'><a href=")
gen.putXMLAttr(
pageNode.expandAttr("%%pathto:%s;" % p.getID(), pageNode))
gen.putString(">%s</a></span>" % p.title)
elif directive == "navigation":
gen.putString("<ul>\n")
# get the branch of DocPage nodes from the site root to this page
activePageNodes = [x for x in walkAncestors(pageNode, DocPage)]
# find the root site node and publish the contents
siteNode = walkAncestors(pageNode, DocSite).next()
siteNode.publishIndex(gen, pageNode, activePageNodes, True)
gen.putString("</ul>\n")
elif directive == "tableofcontents":
pageNode.publishTableOfContents(gen, pageNode)
elif directive == "env":
envName = m.group(2)
if envName in os.environ:
gen.putString(os.environ[envName])
else:
print "warning: environment variable '%s' not defined" % envName
else:
print "warning: ignoring unknown directive '%s'" % label
if next < len(self.text):
gen.putXMLString(self.text[next:])
# --------------------------------------------------------------------
class DocCodeText(DocBareNode):
# --------------------------------------------------------------------
def __init__(self, text):
DocBareNode.__init__(self)
self.text = text
def __str__(self):
return DocNode.__str__(self) + ":text:'" + \
self.text.encode('utf-8').encode('string_escape') + "'"
# --------------------------------------------------------------------
class DocCode(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL = None, locator = None):
DocNode.__init__(self, attrs, URL, locator)
self.type = "plain"
if attrs.has_key("type"): self.type = attrs["type"]
def __str__(self):
str = "<web:precode"
for k, v in self.attrs.items():
str = str + " " + k + "='" + xml.sax.saxutils.escape(v) + "'"
str = str + "> type = " + self.type
return DocNode.__str__(self) + ":" + str
def publish(self, gen, pageNode = None):
code = ""
for n in self.getChildren():
if n.isA(DocCodeText):
code = code + n.text
if has_pygments and not self.type == "plain":
try:
lexer = pygments.lexers.get_lexer_by_name(self.type)
gen.putString(pygments.highlight(code,
lexer,
pygments.formatters.HtmlFormatter()))
except pygments.util.ClassNotFound:
print "warning: could not find a syntax highlighter for '%s'" % self.type
gen.putString("<pre>" + code + "</pre>")
else:
gen.putString("<pre>" + code + "</pre>")
DocNode.publish(self, gen, pageNode)
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocHtmlElement(DocNode):
# --------------------------------------------------------------------
def __init__(self, tag, attrs, URL = None, locator = None):
DocNode.__init__(self, attrs, URL, locator)
self.tag = tag
def __str__(self):
str = "<html:" + self.tag
for k, v in self.attrs.items():
str = str + " " + k + "='" + xml.sax.saxutils.escape(v) + "'"
str = str + ">"
return DocNode.__str__(self) + ":" + str
def getPublishURL(self):
anc = self.findAncestors(DocPage)
if len(anc) == 0: return None
return anc[0].getPublishURL() + "#" + self.id
def publish(self, gen, pageNode = None):
gen.putString("<")
gen.putString(self.tag)
# make sure headings have and id (for ToCs)
if self.tag in ['h1', 'h2', 'h3', 'h4', 'h5'] and \
not "id" in self.attrs:
self.attrs["id"] = self.id ;
for name, value in self.attrs.items():
gen.putString(" ")
gen.putString(name)
gen.putString("=")
gen.putXMLAttr(self.expandAttr(value, pageNode))
if self.tag == 'br':
# workaround for browser that do not like <br><br/>
gen.putString("/>")
elif self.tag == 'code':
# expand tags such as <code>vl_function</code> as links
gen.putString("/>")
text = "".join([y.text for y in walkNodes(self, DocHtmlText)])
ok = nodeIndex.has_key(text)
if ok: gen.putString("<a href=" + self.expandAttr("%%pathto:%s;" % text, pageNode) + ">")
DocNode.publish(self, gen, pageNode)
if ok: gen.putString("</a>")
gen.putString("</")
gen.putString(self.tag)
gen.putString(">")
else:
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</")
gen.putString(self.tag)
gen.putString(">")
publish = makeGuard(publish)
# --------------------------------------------------------------------
class DocTemplate(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
# --------------------------------------------------------------------
class DocPageStyle(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def publish(self, gen, pageNode = None):
return None
def expand(self, gen, pageNode = None):
sa = self.getAttributes()
if sa.has_key("href"):
gen.putString("<link rel=\"stylesheet\" type=")
if sa.has_key("type"):
gen.putXMLAttr(self.expandAttr(sa["type"], pageNode))
else:
gen.putString("\"text/css\" ")
gen.putString("href=")
gen.putXMLAttr(self.expandAttr(sa["href"], pageNode))
gen.putString("></link>\n")
else:
gen.putString("<style rel=\"stylesheet\" type=")
if sa.has_key("type"):
gen.putXMLAttr(self.expandAttr(sa["type"], pageNode))
else:
gen.putString("\"text/css\" ")
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</style>\n")
expand = makeGuard(expand)
# --------------------------------------------------------------------
class DocPageScript(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
def publish(self, gen, pageNode = None):
return None
def expand(self, gen, pageNode = None):
sa = self.getAttributes()
gen.putString("<script type=")
if sa.has_key("type"):
gen.putXMLAttr(self.expandAttr(sa["type"], pageNode))
gen.putString(" ")
else:
gen.putString("\"text/javascript\" ")
if sa.has_key("src"):
gen.putString("src=")
gen.putXMLAttr(self.expandAttr(sa["src"], pageNode))
gen.putString(">")
DocNode.publish(self, gen, pageNode)
gen.putString("</script>\n")
expand = makeGuard(expand)
# --------------------------------------------------------------------
class DocPage(DocNode):
# --------------------------------------------------------------------
counter = 0
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
DocPage.counter = 1 + DocPage.counter
self.templateID = "template.default"
self.name = "page%d" % DocPage.counter
self.title = "untitled"
self.hide = False
for k, v in self.attrs.items():
if k == 'src':
self.title = v
elif k == 'name':
self.name = v
elif k == 'id':
pass
elif k == 'title':
self.title = v
elif k == 'hide':
self.hide = (v.lower() == 'yes')
else:
raise DocError(
"web:page cannot have '%s' attribute" % k)
def __str__(self):
return DocNode.__str__(self) + ":<web:page name='%s' title='%s'>" \
% (xml.sax.saxutils.escape(self.name),
xml.sax.saxutils.escape(self.title))
def getPublishFileName(self):
return self.name + ".html"
def getPublishURL(self):
siteNode = self.findAncestors(DocSite)[0]
return siteNode.getPublishURL() + \
self.getPublishDirName() + \
self.getPublishFileName()
def visit(self, generator):
generator.open(self.getPublishFileName())
templateNode = nodeIndex[self.templateID]
templateNode.publish(generator, self)
generator.close()
DocNode.visit(self, generator)
def publish(self, generator, pageNode = None):
if pageNode is self:
# this is the page being published, so go on
if opts.verb: print 'Publishing \'%s\'' % self.getPublishURL()
DocNode.publish(self, generator, pageNode)
# otherwise this page has been encountered recursively
# during publishing
return None
def publishIndex(self, gen, inPage, activePageNodes, full=False):
if self.hide: return False
active = (self in activePageNodes)
if active:
activeLeaf = (activePageNodes.index(self) == 0)#len(activePageNodes)-1)
else:
activeLeaf = False
gen.putString("<li")
if active: gen.putString(" class='active'")
if activeLeaf: gen.putString(" class='activeLeaf'")
gen.putString("><a href=")
gen.putXMLAttr(
self.expandAttr("%%pathto:%s;" % self.getID(), inPage))
gen.putString(">")
gen.putXMLString(self.title)
gen.putString("</a>\n")
# Generate recursively the index of the children
# This may or may not produce results; if not we need to backtrack,
# so we save the position of the generator.
pos = gen.tell()
gen.putString("<ul>\n")
if active or full:
notEmpty = DocNode.publishIndex(self, gen, inPage, activePageNodes, full)
else:
notEmpty = False
if notEmpty:
gen.putString("</ul>")
else:
gen.seek(pos)
gen.putString("</li>\n")
return True
# --------------------------------------------------------------------
class DocSite(DocNode):
# --------------------------------------------------------------------
def __init__(self, attrs, URL, locator):
DocNode.__init__(self, attrs, URL, locator)
self.siteURL = "http://www.foo.org/"
self.outDir = "html"
def __str__(self):
return DocNode.__str__(self) + ":<web:site>"
def getPublishURL(self):
return self.siteURL
def setPublishURL(self, url):
self.siteURL = url
def getPublishDirName(self):
return ""
def getOutDir(self):
return self.outDir
def setOutDir(self, outDir):
self.outDir = outDir
def publish(self):
generator = Generator(self.outDir)
self.visit(generator)
publish = makeGuard(publish)
# --------------------------------------------------------------------
class Generator:
# --------------------------------------------------------------------
def __init__(self, rootDir):
ensureDir(rootDir)
self.fileStack = []
self.dirStack = [rootDir]
ensureDir(rootDir)
#print "CD ", rootDir
def open(self, filePath):
filePath = os.path.join(self.dirStack[-1], filePath)
fid = open(filePath, "w")
self.fileStack.append(fid)
fid.write(DOCTYPE_XHTML_TRANSITIONAL)
#print "OPEN ", filePath
def putString(self, str):
fid = self.fileStack[-1]
try:
encoded = str.encode('utf-8')
fid.write(encoded)
except (UnicodeEncodeError, IOError), e:
print str
raise DocError("writing text:" + e.__str__())
except:
raise
def putXMLString(self, str):
fid = self.fileStack[-1]
xstr = xml.sax.saxutils.escape(str, mapUnicodeToHtmlEntity)
try:
fid.write(xstr.encode('utf-8'))
except (UnicodeEncodeError, IOError), e:
raise DocError("writing XML-escaped string:" + e.__str__())
except:
raise
def putXMLAttr(self, str):
fid = self.fileStack[-1]
xstr = xml.sax.saxutils.quoteattr(str)
try:
fid.write(xstr.encode('utf-8'))
except (UnicodeEncodeError, IOError), e:
raise DocError("writing XML-escaped attribute:" + e.__str__())
except:
raise
def close(self):
self.fileStack.pop().close()
#print "CLOSE"
def changeDir(self, dirName):
currentDir = self.dirStack[-1]
newDir = os.path.join(currentDir, dirName)
ensureDir(newDir)
self.dirStack.append(newDir)
#print "CD ", newDir
def parentDir(self):
self.dirStack.pop()
#print "CD .."
def tell(self):
fid = self.fileStack[-1]
return fid.tell()
def seek(self, pos):
fid = self.fileStack[-1]
fid.seek(pos)
# --------------------------------------------------------------------
class DocHandler(ContentHandler):
# --------------------------------------------------------------------
def __init__(self):
ContentHandler.__init__(self)
self.rootNode = None
self.stack = []
self.locatorStack = []
self.filePathStack = []
self.inDTD = False
def resolveEntity(self, publicid, systemid):
"""
Resolve XML entities by mapping to a local copy of the (X)HTML
DTDs.
"""
return open(os.path.join(
os.path.dirname(__file__),
'dtd/xhtml1',
systemid[systemid.rfind('/')+1:]), "rb")
def lookupFile(self, filePath):
if os.path.exists(filePath):
return filePath
if filePath[0] == '/':
return None
for path in self.filePathStack:
dir = os.path.dirname(path)
qualFilePath = os.path.join(dir, filePath)
if os.path.exists(qualFilePath):
return qualFilePath
return None
def makeError(self, message):
e = DocError(message)
for i in xrange(len(self.filePathStack)-1,-1,-1):
URL = self.filePathStack[i]
locator = self.locatorStack[i]
e.appendLocation(DocLocation(URL,
locator.getLineNumber(),
locator.getColumnNumber()))
return e
def startElement(self, name, attrs):
"""
SAX interface: starting of XML element.
The function creates a new document node, i.e. a specialized
class of DocNode for the type of XML element encountered. It then
appends it as the head of the parsing stack for further processing."
"""
# convert attrs to a dictionary (implicitly copies as required by the doc)
attrs_ = {}
for k, v in attrs.items():
attrs_[k] = v
attrs = attrs_
URL = self.getCurrentFileName()
locator = self.getCurrentLocator()
# The <web:include> element is not parsed recusrively; instead
# it simply switches to parsing the specified file.
if name == "include":
if not attrs.has_key("src"):
raise self.makeError("<web:include> lacks the 'src' attribute")
filePath = attrs["src"]
qualFilePath = self.lookupFile(filePath)
if qualFilePath is None:
raise self.makeError("the file '%s' could not be found while expanding <web:include>" % filePath)
if opts.verb: print "Parsing '%s'" % qualFilePath
if attrs.has_key("type"):
includeType = attrs["type"]
else:
includeType = "webdoc"
if includeType == "webdoc":
self.load(qualFilePath)
elif includeType == "text":
self.characters(open(qualFilePath, 'r').read())
else:
raise makeError("'%s' is not a valid <web:include> type" % includeType)
return
if len(self.stack) == 0:
parent = None
else:
parent = self.stack[-1]
node = None
if name == "site":
node = DocSite(attrs, URL, locator)
elif name == "page":
node = DocPage(attrs, URL, locator)
elif name == "dir":
node = DocDir(attrs, URL, locator)
elif name == "template":
node = DocTemplate(attrs, URL, locator)
elif name == "pagestyle":
node = DocPageStyle(attrs, URL, locator)
elif name == "pagescript":
node = DocPageScript(attrs, URL, locator)
elif name == "group":
node = DocGroup(attrs, URL, locator)
elif name == "precode":
node = DocCode(attrs, URL, locator)
else:
node = DocHtmlElement(name, attrs, URL, locator)
if parent: parent.adopt(node)
self.stack.append(node)
def endElement(self, name):
"""
SAX interface: closing of XML element.
"""
if name == "include":
return
node = self.stack.pop()
if len(self.stack) == 0:
self.rootNode = node
def load(self, qualFilePath):
self.filePathStack.append(qualFilePath)
parser = xml.sax.make_parser()
parser.setContentHandler(self)
parser.setEntityResolver(self)
parser.setProperty(xml.sax.handler.property_lexical_handler, self)
try:
parser.parse(qualFilePath)
except xml.sax.SAXParseException, e:
raise self.makeError("XML parsing error: %s" % e.getMessage())
def setDocumentLocator(self, locator):
"""SAX interface: This is called when a new file is parsed to set the locator object."""
self.locatorStack.append(locator)
def getCurrentLocator(self):
if len(self.locatorStack) > 0:
return self.locatorStack[-1]
else:
return None
def characters(self, content):
"""
SAX interface: characters.
"""
parent = self.stack[-1]
if parent.isA(DocCDATA):
node = DocCDATAText(content)
elif parent.isA(DocCode):
node = DocCodeText(content)
else:
node = DocHtmlText(content)
parent.adopt(node)
def ignorableWhitespace(self, ws):
self.characters(ws)
def getCurrentFileName(self):
return self.filePathStack[-1]
def endDocument(self):
self.locatorStack.pop()
self.filePathStack.pop()
def startCDATA(self):
node = DocCDATA()
self.stack[-1].adopt(node)
self.stack.append(node)
def endCDATA(self):
node = self.stack.pop()
if len(self.stack) == 0:
self.rootNode = node
def comment(self, body):
if self.inDTD: return
node = DocCDATAText("<!--" + body + "-->")
self.stack[-1].adopt(node)
def startEntity(self, name): pass
def endEntity(self, name): pass
def startDTD(self, name, public_id, system_id):
self.inDTD = True
def endDTD(self):
self.inDTD = False
# --------------------------------------------------------------------
def start(filePath, opts):
# --------------------------------------------------------------------
global doxygenIndex
global doxygenDir
if not has_pygments and opts.verb:
print "Warning: Pygments module not found: syntax coloring disabled."
handler = DocHandler()
try:
handler.load(filePath)
except DocError, e:
print e
sys.exit(-1)
# configure site
handler.rootNode.setOutDir(opts.outdir)
handler.rootNode.setPublishURL(opts.siteurl)
# load doxygen tag file
if opts.doxytag:
if opts.verb: print "Loading the Doxygen tag file", opts.doxytag
try:
doxygenIndex = Doxytag(opts.doxytag)
doxygenDir = opts.doxydir
except Exception, e:
print "Error parsing the Doxygen tag file", opts.doxytag
print e
sys.exit(-1)
if opts.verb > 2:
print "== All pages =="
for x in walkNodes(handler.rootNode, DocPage):
print x
if opts.verb: print "Publishing website..."
try:
handler.rootNode.publish()
except DocError, e:
print e
sys.exit(-1)
if opts.indexfile:
if opts.verb: print "Storing the website index to", opts.indexfile
try:
f = open(opts.indexfile, 'w+')
siteurl = nodeIndex['root'].getPublishURL()
for (id,x) in sorted(nodeIndex.items()):
if (x.isA(DocHtmlElement) or x.isA(DocPage)) and x.attrs.has_key('id'):
url = x.getPublishURL()
if not url: continue
print >>f, '%s|%s' % (x.attrs['id'],
calcRelURL(url,siteurl))
if doxygenIndex:
for tag in sorted(doxygenIndex.index):
url = getDoxygenURL(tag)
print >>f, '%s|%s' % (tag,
calcRelURL(url,siteurl))
except Exception, e:
print "Error writing the website index file"
print e
sys.exit(-1)
sys.exit(0)
# --------------------------------------------------------------------
if __name__ == '__main__':
# --------------------------------------------------------------------
usage = """webdoc [OPTIONS...] <DOC.XML>
--outdir Set output directory
--verbose Be verbose
--doxytag Doxygen tag file
--doxydir Doxygen documentation location
--profile Collect and print profiling information
"""
parser = OptionParser(usage=usage)
parser.add_option(
"-v", "--verbose",
dest = "verb",
default = 0,
action = "count",
help = "print more debuging information")
parser.add_option(
"-o", "--outdir",
dest = "outdir",
default = "html",
action = "store",
help = "write output to this directory")
parser.add_option(
"", "--doxytag",
dest = "doxytag",
default = None,
action = "store",
help = "use this doxygen tag file")
parser.add_option(
"", "--doxydir",
dest = "doxydir",
default = ".",
action = "store",
help = "find doxygen documentation here")
parser.add_option(
"", "--profile",
dest = "profile",
default = False,
action = "store_true",
help = "run the profiler")
parser.add_option(
"", "--siteurl",
dest = "siteurl",
default = "",
action = "store",
help = "set the base URL of the website")
parser.add_option(
"", "--indexfile",
dest = "indexfile",
default = None,
action = "store",
help = "store the website index here")
(opts, args) = parser.parse_args()
if opts.profile:
cProfile.run('start(args[0], opts)')
else:
start(args[0], opts)
| gpl-3.0 |
threatstream/mnemosyne | normalizer/modules/dionaea_capture.py | 1 | 2498 | # Copyright (C) 2012 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
from normalizer.modules.basenormalizer import BaseNormalizer
class DionaeaCaptures(BaseNormalizer):
channels = ('dionaea.capture', 'dionaea.capture.anon', 'dionaea.captures')
def normalize(self, data, channel, submission_timestamp, ignore_rfc1918=True):
o_data = self.parse_record_data(data)
if ignore_rfc1918 and self.is_RFC1918_addr(o_data['saddr']):
return []
session = {
'timestamp': submission_timestamp,
'source_ip': o_data['saddr'],
'source_port': int(o_data['sport']),
'destination_ip': o_data['daddr'],
'destination_port': int(o_data['dport']),
'honeypot': 'dionaea'
}
if 'daddr' in o_data:
session['destination_ip'] = o_data['daddr'],
protocol = super(DionaeaCaptures, self).port_to_service(int(o_data['dport']))
if protocol is not None:
session['protocol'] = protocol
attachments = [
{
'description': 'Binary extraction',
'hashes':
{'md5': o_data['md5'],
'sha512': o_data['sha512']}
}, ]
#url = {'url': o_data['url'],
# 'extractions': [{
# 'timestamp': submission_timestamp,
# 'hashes': {
# 'md5': o_data['md5'],
# 'sha512': o_data['sha512']
# }}]}
session['attachments'] = attachments
relations = {'session': session}
#relations = {'session': session, 'url': url}
return [relations]
| gpl-3.0 |
quexxon/lamprey | setup.py | 1 | 1225 | from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import setup, find_packages
setup(
name="lamprey",
version="0.1.0a1",
description='Lamprey is a Yesql-inspired wrapper around mysqlclient',
license='MIT',
keywords='lamprey mysql mysqlclient mariadb database wrapper yesql',
packages=find_packages(),
test_suite="tests",
install_requires=['mysqlclient'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
url='https://github.com/quexxon/lamprey',
author='Will Clardy',
author_email='will@quexxon.net',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: SQL',
],
)
| mit |
rosswhitfield/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/DirectILLIntegrateVanadium.py | 3 | 7262 | # -*- coding: utf-8 -*-# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import DirectILL_common as common
import ILL_utilities as utils
from mantid.api import (AlgorithmFactory, DataProcessorAlgorithm, InstrumentValidator, ITableWorkspaceProperty,
MatrixWorkspaceProperty, Progress, PropertyMode, WorkspaceProperty, WorkspaceUnitValidator)
from mantid.kernel import (CompositeValidator, Direction, EnabledWhenProperty, FloatBoundedValidator, Property,
PropertyCriterion, StringListValidator)
from mantid.simpleapi import (ComputeCalibrationCoefVan, MaskDetectorsIf)
class DirectILLIntegrateVanadium(DataProcessorAlgorithm):
"""A workflow algorithm which integrates the vanadium data."""
def __init__(self):
"""Initialize an instance of the algorithm."""
DataProcessorAlgorithm.__init__(self)
def category(self):
"""Return the algorithm's category."""
return common.CATEGORIES
def seeAlso(self):
return [ "DirectILLReduction" ]
def name(self):
"""Return the algorithm's name."""
return 'DirectILLIntegrateVanadium'
def summary(self):
"""Return a summary of the algorithm."""
return 'Integrate vanadium workspace. Part of the TOF workflow at ILL.'
def version(self):
"""Return the algorithm's version."""
return 1
def PyExec(self):
"""Executes the data reduction workflow."""
progress = Progress(self, 0.0, 1.0, 4)
self._subalgLogging = self.getProperty(common.PROP_SUBALG_LOGGING).value == common.SUBALG_LOGGING_ON
cleanupMode = self.getProperty(common.PROP_CLEANUP_MODE).value
self._cleanup = utils.Cleanup(cleanupMode, self._subalgLogging)
progress.report('Loading inputs')
mainWS = self._inputWS()
progress.report('Integrating')
mainWS = self._integrate(mainWS)
progress.report('Masking zeros')
mainWS = self._maskZeros(mainWS)
self._finalize(mainWS)
progress.report('Done')
def PyInit(self):
inputWorkspaceValidator = CompositeValidator()
inputWorkspaceValidator.add(InstrumentValidator())
inputWorkspaceValidator.add(WorkspaceUnitValidator('TOF'))
positiveFloat = FloatBoundedValidator(lower=0)
self.declareProperty(MatrixWorkspaceProperty(
name=common.PROP_INPUT_WS,
defaultValue='',
validator=inputWorkspaceValidator,
optional=PropertyMode.Mandatory,
direction=Direction.Input),
doc='A workspace to be integrated.')
self.declareProperty(WorkspaceProperty(name=common.PROP_OUTPUT_WS,
defaultValue='',
direction=Direction.Output),
doc='The integrated workspace.')
self.declareProperty(name=common.PROP_CLEANUP_MODE,
defaultValue=utils.Cleanup.ON,
validator=StringListValidator([
utils.Cleanup.ON,
utils.Cleanup.OFF]),
direction=Direction.Input,
doc='What to do with intermediate workspaces.')
self.declareProperty(name=common.PROP_SUBALG_LOGGING,
defaultValue=common.SUBALG_LOGGING_OFF,
validator=StringListValidator([
common.SUBALG_LOGGING_OFF,
common.SUBALG_LOGGING_ON]),
direction=Direction.Input,
doc='Enable or disable subalgorithms to ' + 'print in the logs.')
self.declareProperty(ITableWorkspaceProperty(
name=common.PROP_EPP_WS,
defaultValue='',
direction=Direction.Input,
optional=PropertyMode.Mandatory),
doc='Table workspace containing results from the FindEPP algorithm.')
self.declareProperty(name=common.PROP_DWF_CORRECTION,
defaultValue=common.DWF_ON,
validator=StringListValidator([
common.DWF_ON,
common.DWF_OFF]),
direction=Direction.Input,
doc='Enable or disable the correction for the Debye-Waller factor for ' + common.PROP_OUTPUT_WS + '.')
self.declareProperty(name=common.PROP_TEMPERATURE,
defaultValue=Property.EMPTY_DBL,
validator=positiveFloat,
direction=Direction.Input,
doc='Vanadium temperature in Kelvin for Debye-Waller correction, '
+ 'overrides the default value from the sample logs.')
self.setPropertySettings(common.PROP_TEMPERATURE, EnabledWhenProperty(common.PROP_DWF_CORRECTION,
PropertyCriterion.IsDefault))
def _inputWS(self):
"""Return the raw input workspace."""
mainWS = self.getProperty(common.PROP_INPUT_WS).value
self._cleanup.protect(mainWS)
return mainWS
def _finalize(self, outWS):
"""Do final cleanup and set the output property."""
self.setProperty(common.PROP_OUTPUT_WS, outWS)
self._cleanup.cleanup(outWS)
self._cleanup.finalCleanup()
def _integrate(self, mainWS):
"""Integrate mainWS applying Debye-Waller correction, if requested."""
eppWS = self.getProperty(common.PROP_EPP_WS).value
calibrationWS = self.getProperty(common.PROP_OUTPUT_WS).value
dwfEnabled = self.getProperty(common.PROP_DWF_CORRECTION).value == common.DWF_ON
temperature = self.getProperty(common.PROP_TEMPERATURE).value
calibrationWS = ComputeCalibrationCoefVan(VanadiumWorkspace=mainWS,
EPPTable=eppWS,
OutputWorkspace=calibrationWS,
Temperature=temperature,
EnableDWF=dwfEnabled,
EnableLogging=self._subalgLogging)
self._cleanup.cleanup(mainWS)
return calibrationWS
def _maskZeros(self, mainWS):
"""Mask zero integrals in mainWS."""
mainWS = MaskDetectorsIf(InputWorkspace=mainWS,
OutputWorkspace=mainWS,
Mode='SelectIf',
Operator='Equal',
Value=0.,
EnableLogging=self._subalgLogging)
return mainWS
AlgorithmFactory.subscribe(DirectILLIntegrateVanadium)
| gpl-3.0 |
vrenaville/OCB | addons/crm_claim/__openerp__.py | 52 | 2067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Claims Management',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
Manage Customer Claims.
=======================
This application allows you to track your customers/suppliers claims and grievances.
It is fully integrated with the email gateway so that you can create
automatically new claims based on incoming emails.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['crm'],
'data': [
'crm_claim_view.xml',
'crm_claim_menu.xml',
'security/ir.model.access.csv',
'report/crm_claim_report_view.xml',
'crm_claim_data.xml',
'res_partner_view.xml',
],
'demo': ['crm_claim_demo.xml'],
'test': [
'test/process/claim.yml',
'test/ui/claim_demo.yml'
],
'installable': True,
'auto_install': False,
'images': [
'images/claim_categories.jpeg',
'images/claim_stages.jpeg',
'images/claims.jpeg'
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Summonee/pelican | pelican/tests/default_conf.py | 24 | 1364 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
AUTHOR = 'Alexis Métaireau'
SITENAME = "Alexis' log"
SITEURL = 'http://blog.notmyidea.org'
TIMEZONE = 'UTC'
GITHUB_URL = 'http://github.com/ametaireau/'
DISQUS_SITENAME = "blog-notmyidea"
PDF_GENERATOR = False
REVERSE_CATEGORY_ORDER = True
DEFAULT_PAGINATION = 2
FEED_RSS = 'feeds/all.rss.xml'
CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'
LINKS = (('Biologeek', 'http://biologeek.org'),
('Filyb', "http://filyb.info/"),
('Libert-fr', "http://www.libert-fr.com"),
('N1k0', "http://prendreuncafe.com/blog/"),
('Tarek Ziadé', "http://ziade.org/blog"),
('Zubin Mithra', "http://zubin71.wordpress.com/"),)
SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),
('lastfm', 'http://lastfm.com/user/akounet'),
('github', 'http://github.com/ametaireau'),)
# global metadata to all the contents
DEFAULT_METADATA = {'yeah': 'it is'}
# path-specific metadata
EXTRA_PATH_METADATA = {
'extra/robots.txt': {'path': 'robots.txt'},
}
# static paths will be copied without parsing their contents
STATIC_PATHS = [
'pictures',
'extra/robots.txt',
]
FORMATTED_FIELDS = ['summary', 'custom_formatted_field']
# foobar will not be used, because it's not in caps. All configuration keys
# have to be in caps
foobar = "barbaz"
| agpl-3.0 |
wmfs/chimp | src/load/Loader.py | 1 | 37771 | import psycopg2
import os
import imp
import chimpExternalLoader
from load.Stager import Stager
import cs
class Loader:
def __init__(self, supportConnection, supportCursor, settings, queue):
self.supportConnection = supportConnection
self.supportCursor = supportCursor
self.queue = queue
self.settings = settings
def makeEditableFile(self, loopConnection, dataConnection, dataCursor, settings, taskId, processLimit, specification, args):
table = args["table"]
appLogger = settings.appLogger
self.queue.startTask(taskId, True)
appLogger.debug("| {0}:".format(table))
# Any editable data here already?
# ===============================
sql = "select exists (select 1 from editable.{0} limit 1)".format(table)
self.supportCursor.execute(sql)
dataExists = self.supportCursor.fetchone()
dataExists = dataExists[0]
appLogger.debug("| dataExists: {0}".format(dataExists))
# Get current timestamp
# =====================
sql = "select now()"
self.supportCursor.execute(sql)
thisImportStartTimestamp = self.supportCursor.fetchone()[0]
appLogger.debug("| thisImportStartTimestamp : {0}".format(thisImportStartTimestamp))
# Get last time schemas synchronised
# ==================================
sql = "select last_sent_to_editable from shared.specification_registry where name=%s"
self.supportCursor.execute(sql, (specification.name,))
lastImportTimestamp = self.supportCursor.fetchone()[0]
appLogger.debug("| lastImportTimestamp : {0}".format(lastImportTimestamp))
# Scanning
# ========
appLogger.debug("| Scanning")
# Modified
scanSql = "select count(*) from import.{0}" .format(table)
if lastImportTimestamp is not None:
scanSql += " where modified >%s"
self.supportCursor.execute(scanSql, (lastImportTimestamp,))
else:
self.supportCursor.execute(scanSql)
modifiedCount = self.supportCursor.fetchone()[0]
appLogger.debug("| Modified = {0}".format(modifiedCount))
scanSql = "select count(*) from history.import_{0}_deletes" .format(table)
if lastImportTimestamp is not None:
scanSql += " where deleted >%s"
self.supportCursor.execute(scanSql, (lastImportTimestamp,))
else:
self.supportCursor.execute(scanSql)
deletedCount = self.supportCursor.fetchone()[0]
appLogger.debug("| Deleted = {0}".format(deletedCount))
totalCount = modifiedCount + deletedCount
appLogger.debug("| {0}".format(totalCount))
self.queue.setScanResults(taskId, totalCount)
# Grab transformer function
# =========================
moduleFilename = cs.getChimpScriptFilenameToUse(settings.paths["repository"], ("specifications", specification.name, "resources", "py", "transformation","editable"), "%s_editable_transformer.py" %(table))
module = imp.load_source("%s_editable_transformer.py" %(table), moduleFilename)
transformFunction = module.transformSuppliedValues
# Establish files
# ===============
filename = os.path.join(settings.env["tempPath"], "insert_into_editable_{0}.sql".format(table))
appLogger.debug("|")
appLogger.debug("| Filename: {0}".format(filename))
insertFile = open(filename,"w")
# Calculate DML placeholders
# ==========================
insertDml = "execute editable.{0}_insert(%s,%s".format(table)
i=args["selectListLength"]
while i>0:
insertDml += ",%s"
i-=1
insertDml += ',"import");'
appLogger.debug("| insertDml : {0}".format(insertDml))
loopSql = "select {0} from import.{1}".format(args["selectList"], table)
loopCursor = loopConnection.makeCursor("loopCursor", True, True)
loopCursor.execute(loopSql)
lineCount = 0
successCount = 0
exceptionCount=0
errorCount = 0
warningCount = 0
noticeCount = 0
ignoredCount = 0
if not dataExists:
for data in loopCursor:
if lineCount % 1000 == 0:
self.queue.setTaskProgress(taskId, successCount, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
lineCount = lineCount + 1
transformedValues = transformFunction(dataCursor, data)
quoted = str(psycopg2.extensions.adapt(transformedValues).getquoted())
quoted = quoted[8:-2]
line ="select editable.{0}_insert({1}, 'import');\n".format(table, quoted)
insertFile.write(line)
successCount+=1
#line = self.supportCursor.mogrify(insertDml,transformedValues)
insertFile.close()
loopCursor.close()
appLogger.debug("| Finished.")
self.supportConnection.connection.commit()
return( (successCount, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
#def processSendToEditable(dictionaryConnection, dictionaryCursor, inserterConnection, inserterCursor, namedConnection, namedCursor, settings, taskId, groupId, processLimit, args):
def processFinishEditable(self, queue, supportConnection, supportCursor, loopConnection, dataConnection, dataCursor, settings, taskId, processLimit, args, recordedTimestamp):
# Init
lineCount = 0
successCount = 1
exceptionCount=0
errorCount=0
warningCount=0
noticeCount=0
ignoredCount=0
appLogger = settings.appLogger
specification=args["specification"]
sql = "update shared.specification_registry set last_sent_to_editable=%s where name=%s"
appLogger.debug("| recordedTimestamp : {0}".format(recordedTimestamp))
appLogger.debug("| specification : {0}".format(specification))
supportCursor.execute(sql, (recordedTimestamp, specification))
supportConnection.connection.commit()
queue.finishTask(taskId, 1, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
return( (successCount, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
def processSendToEditable(self, loopConnection, dataConnection, dataCursor, settings, taskId, processLimit, specification, args):
commitThreshold = int(settings.env["dataCommitThreshold"])
appLogger = settings.appLogger
self.queue.startTask(taskId, True)
# Get last time schemas synchronised
sql = "select last_sent_to_editable from shared.specification_registry where name=%s"
self.supportCursor.execute(sql, (specification.name,))
lastImportTimestamp = self.supportCursor.fetchone()[0]
appLogger.debug("| lastImportTimestamp : {0}".format(lastImportTimestamp))
# Grab record
table = args["table"]
for r in specification.records:
if r.table == table:
thisRecord = r
# Scanning
# ========
affectedRecordCount = 0
appLogger.debug("| Scanning {0}:".format(table))
# Count records that have been inserted/updated
if lastImportTimestamp is None:
sql = "select count(*) from import.%s" %(table)
self.supportCursor.execute(sql)
else:
sql = "select count(*) from import.%s where modified >" %(table)
sql = sql + "%s"
self.supportCursor.execute(sql, (lastImportTimestamp,))
recordsModified = self.supportCursor.fetchone()[0]
appLogger.debug("| {0} (modified)".format(recordsModified))
affectedRecordCount = affectedRecordCount + recordsModified
# Count records that have been deleted
if lastImportTimestamp is None:
sql = "select count(*) from history.import_%s_deletes" %(table)
self.supportCursor.execute(sql)
else:
sql = "select count(*) from history.import_%s_deletes where deleted >" %(table)
sql = sql + "%s"
self.supportCursor.execute(sql, (lastImportTimestamp,))
recordsModified = self.supportCursor.fetchone()[0]
appLogger.debug("| {0} (deleted)".format(recordsModified))
affectedRecordCount = affectedRecordCount + recordsModified
appLogger.debug("| affectedRecordCount : {0} (total)".format(affectedRecordCount))
self.queue.setScanResults(taskId, affectedRecordCount)
lineCount = 0
successCount = 0
exceptionCount=0
errorCount = 0
warningCount = 0
noticeCount = 0
ignoredCount = 0
# Fire off the deletes
# ====================
appLogger.debug("|")
appLogger.debug("| PROCESSING:")
appLogger.debug("|")
appLogger.debug("| DELETES")
appLogger.debug("| {0}".format(thisRecord.table))
sql = "select id from history.import_%s_deletes" %(thisRecord.table)
if lastImportTimestamp is None:
params=None
else:
sql = sql + " where deleted > %s"
params=(lastImportTimestamp,)
deleteDml = "delete from editable.%s" %(thisRecord.table)
deleteDml = deleteDml + " where id = %s"
loopCursor = loopConnection.makeCursor("loopCursor", True, True)
loopCursor.execute(sql, params)
for data in loopCursor:
if lineCount % 1000 == 0:
self.queue.setTaskProgress(taskId, successCount, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
lineCount = lineCount + 1
if lineCount % commitThreshold == 0:
appLogger.debug("| << Transaction size threshold reached ({0}): COMMIT >>".format(lineCount))
dataConnection.connection.commit()
# Decision call to go here
deleteAllowed = True
if deleteAllowed:
successCount = successCount + 1
dataCursor.execute(deleteDml, (data[0],))
else:
warningCount = warningCount + 1
loopCursor.connection.commit()
# Fire off the inserts/updates
# ============================
appLogger.debug("|")
appLogger.debug("| INSERT/UPDATE")
placeholder = "%s,%s,%s,%s"
for thisField in thisRecord.fields:
if thisField.column is not None:
placeholder = placeholder + ",%s"
for thisField in thisRecord.additionalFields:
placeholder = placeholder + ",%s"
appLogger.debug("| {0}".format(thisRecord.table))
# OPTIMISE:
# Is there any data for this record in editable?
# If not, then don't bother with the costly merge view.
sql = "select exists (select 1 from editable.{0} limit 1)".format(thisRecord.table)
self.supportCursor.execute(sql)
dataExists = self.supportCursor.fetchone()
dataExists = dataExists[0]
appLogger.debug("| dataExists: {0}".format(dataExists))
# Build SQL statement to find
# all affected records
columnList=[]
columnList.append("id")
if dataExists:
columnList.append("editable_record_exists")
importSliceStart = 2
else:
importSliceStart = 1
importSliceEnd = importSliceStart -1
for thisField in thisRecord.fields:
if thisField.column is not None:
columnList.append(thisField.column)
importSliceEnd = importSliceEnd +1
for thisField in thisRecord.additionalFields:
columnList.append(thisField.column)
importSliceEnd = importSliceEnd +1
columnList.append("created")
columnList.append("modified")
if dataExists:
for thisField in thisRecord.fields:
if thisField.column is not None:
columnList.append("e_%s" %(thisField.column))
for thisField in thisRecord.additionalFields:
columnList.append("e_%s" %(thisField.column))
columnList.append("e_visibility")
columnList.append("e_security")
originalEnd = len(columnList)-1
if dataExists:
source="shared.{0}_to_merge_into_editable".format(thisRecord.table)
else:
source="import.{0}".format(thisRecord.table)
sql = "select {0} from {1}".format(",".join(columnList),source)
if lastImportTimestamp is None:
params=None
else:
sql = sql + " where modified > %s::timestamp"
params=(lastImportTimestamp,)
# BUILD DML Statements
placeholder = "%s,%s,%s,%s"
for thisField in thisRecord.fields:
if thisField.column is not None:
placeholder = placeholder + ",%s"
for thisField in thisRecord.additionalFields:
placeholder = placeholder + ",%s"
insertDml = "select * from editable.%s_insert(%s)" %(thisRecord.table, placeholder)
updateDml = "select * from editable.%s_update(%s)" %(thisRecord.table, placeholder)
# Grab transformer function
moduleFilename = cs.getChimpScriptFilenameToUse(settings.paths["repository"], ("specifications", specification.name, "resources", "py", "transformation","editable"), "%s_editable_transformer.py" %(thisRecord.table))
module = imp.load_source("%s_editable_transformer.py" %(thisRecord.table), moduleFilename)
transformFunction = module.transformSuppliedValues
# Loop through all inserted/updated records
appLogger.debug("| loopSql : {0}".format(sql))
appLogger.debug("| insertDml : {0}".format(insertDml))
appLogger.debug("| updateDml : {0}".format(updateDml))
loopCursor = loopConnection.makeCursor("loopCursor", True, True)
loopCursor.execute(sql, params)
for data in loopCursor:
if lineCount % 1000 == 0:
self.queue.setTaskProgress(taskId, successCount, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
lineCount = lineCount + 1
if lineCount % commitThreshold == 0:
appLogger.debug("| << Transaction size threshold reached ({0}): COMMIT >>".format(lineCount))
dataConnection.connection.commit()
# Transform values
transformedValues = transformFunction(dataCursor, data)
# Assemble values to apply
applyValues=[data[0],"import"]
applyValues.extend(data[importSliceStart:importSliceEnd+1])
applyValues.extend(transformedValues[originalEnd+1:])
if dataExists:
if data["editable_record_exists"]:
dataCursor.execute(updateDml, applyValues)
messages = dataCursor.fetchall()
else:
dataCursor.execute(insertDml, applyValues)
messages = dataCursor.fetchall()
else:
dataCursor.execute(insertDml, applyValues)
messages = dataCursor.fetchall()
success=True
for thisMessage in messages:
msgLevel = thisMessage[0]
msgCode = thisMessage[1]
msgTitle = thisMessage[2]
msgAffectedColumns = thisMessage[3]
msgAffectedRowCount = thisMessage[4]
msgContent = thisMessage[5]
self.queue.addTaskMessage(taskId, thisRecord.table, lineCount, msgLevel, msgCode, msgTitle, msgAffectedColumns, msgAffectedRowCount, "{0}: {1}".format(msgContent,transformedValues))
if msgLevel=="warning":
warningCount += 1
success=False
elif msgLevel=="error":
errorCount += 1
success=False
elif msgLevel=="exception":
exceptionCount += 1
success=False
elif msgLevel=="notice":
noticeCount += 1
if success:
successCount = successCount + 1
loopCursor.close()
return( (successCount, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
def processImportSyncDeletes(self, loopConnection, dataConnection, dataCursor, settings, taskId, processLimit, specification, args):
minTaskId = args["minTaskId"]
maxTaskId = args["maxTaskId"]
successCount = 0
exceptionCount=0
errorCount=0
warningCount=0
noticeCount=0
ignoredCount=0
self.queue.startTask(taskId, True)
if minTaskId is not None and maxTaskId is not None:
# Scanning
for thisRecord in specification.records:
if thisRecord.table == args["table"]:
if minTaskId==maxTaskId:
sql = "select count(*) from import.%s" %(thisRecord.table)
sql = sql + " where last_affirmed_task_id != %s"
settings.appLogger.debug("| staleSql:{0}".format(sql))
dataCursor.execute(sql, (minTaskId,))
else:
sql = "select count(*) from import.%s" %(thisRecord.table)
sql = sql + " where last_affirmed_task_id not (between %s and %s)"
settings.appLogger.debug("| staleSql:{0}".format(sql))
dataCursor.execute(sql, (minTaskId,maxTaskId))
staleCount = int(dataCursor.fetchone()[0])
self.queue.setScanResults(taskId, staleCount)
settings.appLogger.debug("| staleCount: {0}".format(staleCount))
params=[]
for i in range(0,len(thisRecord.primaryKeyColumns)+1):
params.append("%s")
dml = "select * from import.%s_delete(%s)" %(thisRecord.table, cs.delimitedStringList(params,","))
if minTaskId==maxTaskId:
sql = "select %s from import.%s" %(cs.delimitedStringList(thisRecord.primaryKeyColumns,","), thisRecord.table)
sql = sql + " where last_affirmed_task_id != %s"
settings.appLogger.debug("| dataSQL: {0}".format(sql%(minTaskId)))
dataCursor.execute(sql, (minTaskId,))
else:
sql = "select %s from import.%s" %(cs.delimitedStringList(thisRecord.primaryKeyColumns,","), thisRecord.table)
sql = sql + " where last_affirmed_task_id not (between %s and %s)"
settings.appLogger.debug("| dataSQL:{0}".format(sql%(minTaskId,maxTaskId)))
dataCursor.execute(sql, (minTaskId,maxTaskId))
settings.appLogger.debug("| dml: {0}".format(dml))
results = dataCursor.fetchall()
deletedRowCount = 0
for data in results:
deleteParams=[]
deleteParams.append(taskId)
i=0
for thisPkColumn in thisRecord.primaryKeyColumns:
deleteParams.append(data[i])
i=i+1
if deletedRowCount < 10:
settings.appLogger.debug("| {0}".format(deleteParams))
dataCursor.execute(dml, tuple(deleteParams))
deletedRowCount += 1
messages = dataCursor.fetchall()
for thisMessage in messages:
msgLevel = thisMessage[0]
msgCode = thisMessage[1]
msgTitle = thisMessage[2]
msgAffectedColumns = thisMessage[3]
msgAffectedRowCount = thisMessage[4]
msgContent = thisMessage[5]
self.queue.addTaskMessage(taskId, thisRecord.table, 0, msgLevel, msgCode, msgTitle, msgAffectedColumns, msgAffectedRowCount, msgContent)
settings.appLogger.debug("| deletedRowCount: {0}".format(deletedRowCount))
self.supportConnection.connection.commit()
return( (successCount, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
def processSendToImport(self, loopConnection, dataConnection, dataCursor, settings, taskId, processLimit, specification, args):
def getAction(sendMode, identification):
if sendMode=="full":
action="insert"
elif sendMode=="change":
action=identification
elif sendMode=="sync":
action="merge"
return(action)
def getSendMode(importMode, fileIntent, hasData, appLogger):
# Settle on what it is we're doing
#
# importMode - auto
# - full
# - change
# - sync
#
# fileIntent - undefined
# - full
# - change
# - mixed
#
#
if importMode=="auto":
if fileIntent=="undefined":
if hasData:
mode = "sync"
else:
mode = "full"
elif fileIntent=="full":
mode = "full"
elif fileIntent=="change":
mode = "change"
elif fileIntent=="mixed":
print("Imports of mixed file intents not supported")
raise
elif importMode=="full":
mode = "full"
elif importMode=="change":
mode = "change"
elif importMode=="sync":
mode = "sync"
appLogger.debug("| {0} (importMode={1} fileIntent={2} hasData={3})".format(mode, importMode, fileIntent, hasData))
return(mode)
appLogger = settings.appLogger
commitThreshold = int(settings.env["dataCommitThreshold"])
table = args["table"]
importMode = args["importMode"]
fileIntent = args["fileIntent"]
strategy = args["strategy"]
hasData = args["hasData"]
sendMode = getSendMode(importMode, fileIntent, hasData, appLogger)
self.queue.startTask(taskId, True)
sql = "select count(*) from stage.{0}".format(table)
self.supportCursor.execute(sql)
scanCount = self.supportCursor.fetchone()[0]
self.queue.setScanResults(taskId, scanCount)
appLogger.debug("| Scan count = {0}".format(scanCount))
lineCount=0
successCount = 0
exceptionCount=0
errorCount=0
warningCount=0
noticeCount=0
ignoredCount=0
# Grab record
for r in specification.records:
if r.table == table:
record = r
appLogger.debug("|")
appLogger.debug("| {0}".format(table))
# BUILD DML STATEMENTS FOR THIS RECORD
# ------------------------------------
selectColumns=[]
insertPlaceholder="select * from import.{0}_insert(".format(table)
insertPlaceholder += "%s,%s"
if not record.editable:
insertPlaceholder += ",%s,%s"
updatePlaceholder="select * from import.{0}_update(".format(table)
updatePlaceholder += "%s"
if not record.editable:
updatePlaceholder += ",%s,%s"
mergePlaceholder="select * from import.{0}_merge(".format(table)
mergePlaceholder += "%s,%s"
if not record.editable:
mergePlaceholder += ",%s,%s"
if record.hasPrimaryKey():
deletePlaceholder="select * from import.{0}_delete(%s" .format(record.table)
for column in record.primaryKeyColumns:
deletePlaceholder+=",%s"
deletePlaceholder += ")"
else:
deletePlaceholder = None
for thisField in record.fields:
if thisField.column is not None:
selectColumns.append(thisField.column)
insertPlaceholder += ",%s"
updatePlaceholder += ",%s"
mergePlaceholder += ",%s"
for thisField in record.additionalFields:
insertPlaceholder+=",%s"
updatePlaceholder+=",%s"
mergePlaceholder+=",%s"
insertPlaceholder+=")"
updatePlaceholder+=")"
mergePlaceholder+=")"
# Grab transformer functions
moduleFilename = cs.getChimpScriptFilenameToUse(settings.paths["repository"], ("specifications", specification.name, "resources", "py","transformation","import"), "{0}_import_transformer.py".format(table))
module = imp.load_source("{0}_import_transformer.py".format(record.table), moduleFilename)
transformer = module.transformSuppliedValues
loopSql = "select id,task_id,{0},identification from stage.{1}".format(",".join(selectColumns),table)
selectCount = 3 +len(selectColumns)
# DEBUG:
appLogger.debug("| Pre-computed statements:")
appLogger.debug("| loopSql : {0}".format(loopSql))
appLogger.debug("| insertPlaceholder : {0}".format(insertPlaceholder))
appLogger.debug("| updatePlaceholder : {0}".format(updatePlaceholder))
appLogger.debug("| mergePlaceholder : {0}".format(mergePlaceholder))
appLogger.debug("| deletePlaceholder : {0}".format(deletePlaceholder))
# Loop through all staged records
loopCursor = loopConnection.makeCursor("loopCursor", True, True)
loopCursor.execute(loopSql)
for data in loopCursor:
if lineCount % 1000 == 0:
self.queue.setTaskProgress(taskId, successCount, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
lineCount = lineCount + 1
if lineCount % commitThreshold == 0:
appLogger.debug("| << Transaction size threshold reached ({0}): COMMIT >>".format(lineCount))
dataConnection.connection.commit()
identification = data["identification"]
workingRow = data
del data[selectCount-1]
workingRow = transformer(dataCursor, workingRow)
action = getAction(sendMode, identification)
if action=="insert":
dataCursor.execute(insertPlaceholder, tuple(workingRow))
elif action=="update":
del workingRow[0]
dataCursor.execute(updatePlaceholder, tuple(workingRow))
elif action=="delete":
None
# deleteParams=[]
# deleteParams.append(stagedRow[1])
# for thisPkColumn in pkColumnLists[data[0]]:
# deleteParams.append(stagedRow[thisPkColumn])
# sql = deletePlaceholders[data[0]]
# dataCursor.execute(sql, tuple(deleteParams))
#
elif action=="merge":
dataCursor.execute(mergePlaceholder, tuple(workingRow))
#
warningFlag = False
errorFlag = False
exceptionFlag=False
messages = dataCursor.fetchall()
success=True
for thisMessage in messages:
msgLevel = thisMessage[0]
msgCode = thisMessage[1]
msgTitle = thisMessage[2]
msgAffectedColumns = thisMessage[3]
msgAffectedRowCount = thisMessage[4]
msgContent = thisMessage[5]
self.queue.addTaskMessage(taskId,
record.table,
lineCount,
msgLevel,
msgCode,
msgTitle,
msgAffectedColumns,
msgAffectedRowCount,
"{0}: {1}".format(msgContent,data))
if msgLevel=="warning":
warningFlag = True
success=False
elif msgLevel=="error":
errorFlag =True
success=False
elif msgLevel=="exception":
exceptionFlag=True
success=False
elif msgLevel=="notice":
noticeCount += 1
if success:
successCount = successCount + 1
else:
if exceptionFlag:
exceptionCount += 1
elif errorFlag:
errorCount += 1
elif warningFlag:
warningCount += 1
loopCursor.close()
return( (successCount, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
def processStageCsv(self, dataConnection, dataCursor, taskId, processLimit, specification, args, appLogger):
filename = args["filename"]
fileIdentification = args["fileIdentification"]
self.queue.startTask(taskId, True)
stager = Stager(self.queue, self.supportConnection, self.supportCursor, dataConnection, dataCursor, taskId, specification, self.settings.paths, self.settings.env["dataCommitThreshold"], self.settings.appLogger)
stager.stageCSV(filename, processLimit)
return (fileIdentification, stager.successCount, stager.exceptionCount, stager.errorCount, stager.warningCount, stager.ignoredCount, stager.noticeCount)
def processStageJSON(self, dataConnection, dataCursor, taskId, processLimit, specification, args):
self.queue.startTask(taskId, True)
stager = Stager(self.queue, self.supportConnection, self.supportCursor, dataConnection, dataCursor, taskId, specification, self.settings.paths, self.settings.env["dataCommitThreshold"], self.settings.appLogger)
stager.stageJSON(args["recordIdentification"], args["record"])
return (stager.successCount, stager.exceptionCount, stager.errorCount, stager.warningCount, stager.ignoredCount, stager.noticeCount)
def processCallExternalLoader(self, taskId, processLimit, args):
self.queue.startTask(taskId, True)
self.queue.setScanResults(taskId, 1)
result = chimpExternalLoader.stageUsingExternalLoader(self.queue.conn, self.queue.cursor, self.settings, taskId, processLimit, args)
return (result)
def processCheckpoint(self, dataConnection, dataCursor, streamName, settings, taskId, processLimit, args, successCountSinceCheckpoint, exceptionCountSinceCheckpoint, errorCountSinceCheckpoint,warningCountSinceCheckpoint,ignoredCountSinceCheckpoint,noticeCountSinceCheckpoint, appLogger):
keepQueueRunning = True
checkpointType = args["checkpointType"]
toleranceLevel = args["toleranceLevel"]
checkpointBehaviour = args["checkpointBehaviour"]
# What action are we going to perform?
if checkpointBehaviour =="commit":
action="commit"
elif checkpointBehaviour == "rollback":
action="rollback"
elif checkpointBehaviour == "tolerate":
if toleranceLevel == "none" and (exceptionCountSinceCheckpoint>0 or errorCountSinceCheckpoint>0 or warningCountSinceCheckpoint>0):
action="rollback"
keepQueueRunning = False
elif toleranceLevel=="warning" and (exceptionCountSinceCheckpoint>0 or errorCountSinceCheckpoint>0):
action="rollback"
keepQueueRunning = False
elif toleranceLevel=="error" and exceptionCountSinceCheckpoint>0 :
action="rollback"
keepQueueRunning = False
elif toleranceLevel=="exception":
action="commit"
else:
action="commit"
appLogger.debug(" -- {0} --".format(action))
# Tidy-up queue
if keepQueueRunning:
sql = "select shared.set_checkpoint_success(%s,%s)"
self.supportCursor.execute(sql,(taskId, streamName))
if action=="rollback":
dataConnection.connection.rollback()
sql = "select shared.set_checkpoint_failure(%s)"
self.supportCursor.execute(sql,(streamName,))
if action=="commit":
dataConnection.connection.commit()
self.supportConnection.connection.commit()
return(keepQueueRunning)
def processScript(self, dataConnection, dataCursor, settings, taskId, processLimit, args):
appLogger = settings.appLogger
exceptionCount = 0
errorCount = 0
warningCount = 0
ignoredCount = 0
noticeCount = 0
i = 0
thisStatement = None
statements = []
try:
filename = args["filename"]
self.queue.startTask(taskId, True)
prepareFile = open(filename, "r")
i = 0
for thisLine in prepareFile:
statement = thisLine.strip()
if statement != "" and statement[:3] != "-- ":
statements.append(statement)
i = i + 1
prepareFile.close()
self.queue.setScanResults(taskId, i)
i = 0
for thisStatement in statements:
if exceptionCount==0:
appLogger.debug("| {0}".format(thisStatement))
dataCursor.execute(thisStatement)
i = i + 1
self.queue.setTaskProgress(taskId, i, exceptionCount, errorCount, warningCount, noticeCount, ignoredCount)
self.supportConnection.connection.commit()
return( (i, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
except Exception as detail:
exceptionCount = exceptionCount + 1
print ("")
print ("Exception processing script: %s" %(filename))
print (str(detail))
print (statements)
dataConnection.connection.rollback()
self.queue.addTaskMessage(taskId, None, i, "exception", "EXP", "Exception executing line:\n%s" %(thisStatement), None, 1, str(detail))
return( (i, exceptionCount, errorCount, warningCount, ignoredCount, noticeCount) )
| gpl-3.0 |
Evervolv/android_external_chromium_org | native_client_sdk/src/build_tools/tests/test_server.py | 170 | 2165 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import os
import SimpleHTTPServer
class LocalHTTPServer(object):
"""Class to start a local HTTP server as a child process."""
def __init__(self, serve_dir):
parent_conn, child_conn = multiprocessing.Pipe()
self.process = multiprocessing.Process(target=_HTTPServerProcess,
args=(child_conn, serve_dir))
self.process.start()
if parent_conn.poll(10): # wait 10 seconds
self.port = parent_conn.recv()
else:
raise Exception('Unable to launch HTTP server.')
self.conn = parent_conn
def Shutdown(self):
"""Send a message to the child HTTP server process and wait for it to
finish."""
self.conn.send(False)
self.process.join()
def GetURL(self, rel_url):
"""Get the full url for a file on the local HTTP server.
Args:
rel_url: A URL fragment to convert to a full URL. For example,
GetURL('foobar.baz') -> 'http://localhost:1234/foobar.baz'
"""
return 'http://localhost:%d/%s' % (self.port, rel_url)
class QuietHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def log_message(self, msg_format, *args):
pass
def _HTTPServerProcess(conn, serve_dir):
"""Run a local httpserver with a randomly-chosen port.
This function assumes it is run as a child process using multiprocessing.
Args:
conn: A connection to the parent process. The child process sends
the local port, and waits for a message from the parent to
stop serving.
serve_dir: The directory to serve. All files are accessible through
http://localhost:<port>/path/to/filename.
"""
import BaseHTTPServer
os.chdir(serve_dir)
httpd = BaseHTTPServer.HTTPServer(('', 0), QuietHTTPRequestHandler)
conn.send(httpd.server_address[1]) # the chosen port number
httpd.timeout = 0.5 # seconds
running = True
while running:
httpd.handle_request()
if conn.poll():
running = conn.recv()
conn.close()
| bsd-3-clause |
hemmerling/codingdojo | src/game_of_life/python_coderetreat_socramob/cr_socrakassel01/gol01_test.py | 1 | 1891 | #This file was originally generated by PyScripter's unitest wizard
import unittest
from gol01 import Gol01
def dummy():
""" Dummy function for comparison of the return values """
return
# tdd = test first
# input -> output
# Ich moechte testen, ob ein Feld 0 Nachbarn hat
# Ich teste die Nachbarn ob sie leben
class Gol01Test(unittest.TestCase):
# Given
def setUp(self):
self.gol = Gol01()
pass
def tearDown(self):
pass
# When
# Then
def testHat0Nachbarn(self):
assert (self.gol.nachbar(0,0) == False) \
and (self.gol.nachbar(0,1) == False) \
and (self.gol.nachbar(0,2) == False) \
and (self.gol.nachbar(1,0) == False) \
and (self.gol.nachbar(1,2) == False) \
and (self.gol.nachbar(2,0) == False) \
and (self.gol.nachbar(2,1) == False) \
and (self.gol.nachbar(2,2) == False) \
, 'Gol01.get_size() does not provide the right return value'
pass
def testHat1NachbarnAuf00(self):
self.gol.setNachbar(0,0)
assert (self.gol.nachbar(0,0) == True) \
and (self.gol.nachbar(0,1) == False) \
and (self.gol.nachbar(0,2) == False) \
and (self.gol.nachbar(1,0) == False) \
and (self.gol.nachbar(1,2) == False) \
and (self.gol.nachbar(2,0) == False) \
and (self.gol.nachbar(2,1) == False) \
and (self.gol.nachbar(2,2) == False) \
, 'Gol01.get_size() does not provide the right return value'
pass
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| apache-2.0 |
DirtyUnicorns/android_external_chromium_org | mojo/python/tests/messaging_unittest.py | 25 | 7317 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
# pylint: disable=F0401
import mojo.embedder
from mojo.bindings import messaging
from mojo import system
class _ForwardingConnectionErrorHandler(messaging.ConnectionErrorHandler):
def __init__(self, callback):
self._callback = callback
def OnError(self, result):
self._callback(result)
class ConnectorTest(unittest.TestCase):
def setUp(self):
mojo.embedder.Init()
self.loop = system.RunLoop()
self.received_messages = []
self.received_errors = []
def _OnMessage(message):
self.received_messages.append(message)
return True
def _OnError(result):
self.received_errors.append(result)
handles = system.MessagePipe()
self.connector = messaging.Connector(handles.handle1)
self.connector.SetIncomingMessageReceiver(
messaging.ForwardingMessageReceiver(_OnMessage))
self.connector.SetErrorHandler(
_ForwardingConnectionErrorHandler(_OnError))
self.connector.Start()
self.handle = handles.handle0
def tearDown(self):
self.connector = None
self.handle = None
self.loop = None
def testConnectorRead(self):
self.handle.WriteMessage()
self.loop.RunUntilIdle()
self.assertTrue(self.received_messages)
self.assertFalse(self.received_errors)
def testConnectorWrite(self):
self.connector.Accept(messaging.Message())
(result, _, _) = self.handle.ReadMessage()
self.assertEquals(result, system.RESULT_OK)
self.assertFalse(self.received_errors)
def testConnectorCloseRemoteHandle(self):
self.handle.Close()
self.loop.RunUntilIdle()
self.assertFalse(self.received_messages)
self.assertTrue(self.received_errors)
self.assertEquals(self.received_errors[0],
system.RESULT_FAILED_PRECONDITION)
def testConnectorDeleteConnector(self):
self.connector = None
(result, _, _) = self.handle.ReadMessage()
self.assertEquals(result, system.RESULT_FAILED_PRECONDITION)
class HeaderTest(unittest.TestCase):
def testSimpleMessageHeader(self):
header = messaging.MessageHeader(0xdeadbeaf, messaging.NO_FLAG)
self.assertEqual(header.message_type, 0xdeadbeaf)
self.assertFalse(header.has_request_id)
self.assertFalse(header.expects_response)
self.assertFalse(header.is_response)
data = header.Serialize()
other_header = messaging.MessageHeader.Deserialize(data)
self.assertEqual(other_header.message_type, 0xdeadbeaf)
self.assertFalse(other_header.has_request_id)
self.assertFalse(other_header.expects_response)
self.assertFalse(other_header.is_response)
def testMessageHeaderWithRequestID(self):
# Request message.
header = messaging.MessageHeader(0xdeadbeaf,
messaging.MESSAGE_EXPECTS_RESPONSE_FLAG)
self.assertEqual(header.message_type, 0xdeadbeaf)
self.assertTrue(header.has_request_id)
self.assertTrue(header.expects_response)
self.assertFalse(header.is_response)
self.assertEqual(header.request_id, 0)
data = header.Serialize()
other_header = messaging.MessageHeader.Deserialize(data)
self.assertEqual(other_header.message_type, 0xdeadbeaf)
self.assertTrue(other_header.has_request_id)
self.assertTrue(other_header.expects_response)
self.assertFalse(other_header.is_response)
self.assertEqual(other_header.request_id, 0)
header.request_id = 0xdeadbeafdeadbeaf
data = header.Serialize()
other_header = messaging.MessageHeader.Deserialize(data)
self.assertEqual(other_header.request_id, 0xdeadbeafdeadbeaf)
# Response message.
header = messaging.MessageHeader(0xdeadbeaf,
messaging.MESSAGE_IS_RESPONSE_FLAG,
0xdeadbeafdeadbeaf)
self.assertEqual(header.message_type, 0xdeadbeaf)
self.assertTrue(header.has_request_id)
self.assertFalse(header.expects_response)
self.assertTrue(header.is_response)
self.assertEqual(header.request_id, 0xdeadbeafdeadbeaf)
data = header.Serialize()
other_header = messaging.MessageHeader.Deserialize(data)
self.assertEqual(other_header.message_type, 0xdeadbeaf)
self.assertTrue(other_header.has_request_id)
self.assertFalse(other_header.expects_response)
self.assertTrue(other_header.is_response)
self.assertEqual(other_header.request_id, 0xdeadbeafdeadbeaf)
class RouterTest(unittest.TestCase):
def setUp(self):
mojo.embedder.Init()
self.loop = system.RunLoop()
self.received_messages = []
self.received_errors = []
def _OnMessage(message):
self.received_messages.append(message)
return True
def _OnError(result):
self.received_errors.append(result)
handles = system.MessagePipe()
self.router = messaging.Router(handles.handle1)
self.router.SetIncomingMessageReceiver(
messaging.ForwardingMessageReceiver(_OnMessage))
self.router.SetErrorHandler(
_ForwardingConnectionErrorHandler(_OnError))
self.router.Start()
self.handle = handles.handle0
def tearDown(self):
self.router = None
self.handle = None
self.loop = None
def testSimpleMessage(self):
header_data = messaging.MessageHeader(0, messaging.NO_FLAG).Serialize()
message = messaging.Message(header_data)
self.router.Accept(message)
self.loop.RunUntilIdle()
self.assertFalse(self.received_errors)
self.assertFalse(self.received_messages)
(res, data, _) = self.handle.ReadMessage(bytearray(len(header_data)))
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(data[0], header_data)
def testSimpleReception(self):
header_data = messaging.MessageHeader(0, messaging.NO_FLAG).Serialize()
self.handle.WriteMessage(header_data)
self.loop.RunUntilIdle()
self.assertFalse(self.received_errors)
self.assertEquals(len(self.received_messages), 1)
self.assertEquals(self.received_messages[0].data, header_data)
def testRequestResponse(self):
header_data = messaging.MessageHeader(
0, messaging.MESSAGE_EXPECTS_RESPONSE_FLAG).Serialize()
message = messaging.Message(header_data)
back_messages = []
def OnBackMessage(message):
back_messages.append(message)
self.router.AcceptWithResponder(message,
messaging.ForwardingMessageReceiver(
OnBackMessage))
self.loop.RunUntilIdle()
self.assertFalse(self.received_errors)
self.assertFalse(self.received_messages)
(res, data, _) = self.handle.ReadMessage(bytearray(len(header_data)))
self.assertEquals(system.RESULT_OK, res)
message_header = messaging.MessageHeader.Deserialize(data[0])
self.assertNotEquals(message_header.request_id, 0)
response_header_data = messaging.MessageHeader(
0,
messaging.MESSAGE_IS_RESPONSE_FLAG,
message_header.request_id).Serialize()
self.handle.WriteMessage(response_header_data)
self.loop.RunUntilIdle()
self.assertFalse(self.received_errors)
self.assertEquals(len(back_messages), 1)
self.assertEquals(back_messages[0].data, response_header_data)
| bsd-3-clause |
Djlavoy/scrapy | scrapy/spiderloader.py | 117 | 1622 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from zope.interface import implementer
from scrapy.interfaces import ISpiderLoader
from scrapy.utils.misc import walk_modules
from scrapy.utils.spider import iter_spider_classes
@implementer(ISpiderLoader)
class SpiderLoader(object):
"""
SpiderLoader is a class which locates and loads spiders
in a Scrapy project.
"""
def __init__(self, settings):
self.spider_modules = settings.getlist('SPIDER_MODULES')
self._spiders = {}
for name in self.spider_modules:
for module in walk_modules(name):
self._load_spiders(module)
def _load_spiders(self, module):
for spcls in iter_spider_classes(module):
self._spiders[spcls.name] = spcls
@classmethod
def from_settings(cls, settings):
return cls(settings)
def load(self, spider_name):
"""
Return the Spider class for the given spider name. If the spider
name is not found, raise a KeyError.
"""
try:
return self._spiders[spider_name]
except KeyError:
raise KeyError("Spider not found: {}".format(spider_name))
def find_by_request(self, request):
"""
Return the list of spider names that can handle the given request.
"""
return [name for name, cls in self._spiders.items()
if cls.handles_request(request)]
def list(self):
"""
Return a list with the names of all spiders available in the project.
"""
return list(self._spiders.keys())
| bsd-3-clause |
kittiu/odoo | addons/point_of_sale/report/pos_details.py | 99 | 9400 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class pos_details(report_sxw.rml_parse):
def _get_invoice(self, inv_id):
res={}
if inv_id:
self.cr.execute("select number from account_invoice as ac where id = %s", (inv_id,))
res = self.cr.fetchone()
return res[0] or 'Draft'
else:
return ''
def _get_all_users(self):
user_obj = self.pool.get('res.users')
return user_obj.search(self.cr, self.uid, [])
def _pos_sales_details(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
data = []
result = {}
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('state','in',['done','paid','invoiced']),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids, context=self.localcontext):
for pol in pos.lines:
result = {
'code': pol.product_id.default_code,
'name': pol.product_id.name,
'invoice_id': pos.invoice_id.id,
'price_unit': pol.price_unit,
'qty': pol.qty,
'discount': pol.discount,
'total': (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)),
'date_order': pos.date_order,
'pos_name': pos.name,
'uom': pol.product_id.uom_id.name
}
data.append(result)
self.total += result['total']
self.qty += result['qty']
self.discount += result['discount']
if data:
return data
else:
return {}
def _get_qty_total_2(self):
return self.qty
def _get_sales_total_2(self):
return self.total
def _get_sum_invoice_2(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id),('invoice_id','<>',False)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_invoiced += (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0))
return self.total_invoiced or False
def _paid_total_2(self):
return self.total or 0.0
def _get_sum_dis_2(self):
return self.discount or 0.0
def _get_sum_discount(self, form):
#code for the sum of discount value
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_discount += ((pol.price_unit * pol.qty) * (pol.discount / 100))
return self.total_discount or False
def _get_payments(self, form):
statement_line_obj = self.pool.get("account.bank.statement.line")
pos_order_obj = self.pool.get("pos.order")
user_ids = form['user_ids'] or self._get_all_users()
company_id = self.pool['res.users'].browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids), ('company_id', '=', company_id)])
data={}
if pos_ids:
st_line_ids = statement_line_obj.search(self.cr, self.uid, [('pos_statement_id', 'in', pos_ids)])
if st_line_ids:
st_id = statement_line_obj.browse(self.cr, self.uid, st_line_ids)
a_l=[]
for r in st_id:
a_l.append(r['id'])
self.cr.execute("select aj.name,sum(amount) from account_bank_statement_line as absl,account_bank_statement as abs,account_journal as aj " \
"where absl.statement_id = abs.id and abs.journal_id = aj.id and absl.id IN %s " \
"group by aj.name ",(tuple(a_l),))
data = self.cr.dictfetchall()
return data
else:
return {}
def _total_of_the_day(self, objects):
return self.total or 0.00
def _sum_invoice(self, objects):
return reduce(lambda acc, obj:
acc + obj.invoice_id.amount_total,
[o for o in objects if o.invoice_id and o.invoice_id.number],
0.0)
def _ellipsis(self, orig_str, maxlen=100, ellipsis='...'):
maxlen = maxlen - len(ellipsis)
if maxlen <= 0:
maxlen = 1
new_str = orig_str[:maxlen]
return new_str
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen, ' ...')
def _get_tax_amount(self, form):
taxes = {}
account_tax_obj = self.pool.get('account.tax')
user_ids = form['user_ids'] or self._get_all_users()
pos_order_obj = self.pool.get('pos.order')
company_id = self.pool['res.users'].browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids), ('company_id', '=', company_id)])
for order in pos_order_obj.browse(self.cr, self.uid, pos_ids):
for line in order.lines:
line_taxes = account_tax_obj.compute_all(self.cr, self.uid, line.product_id.taxes_id, line.price_unit * (1-(line.discount or 0.0)/100.0), line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'amount':0.0})
taxes[tax['id']]['amount'] += tax['amount']
return taxes.values()
def _get_user_names(self, user_ids):
user_obj = self.pool.get('res.users')
return ', '.join(map(lambda x: x.name, user_obj.browse(self.cr, self.uid, user_ids)))
def __init__(self, cr, uid, name, context):
super(pos_details, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.qty = 0.0
self.total_invoiced = 0.0
self.discount = 0.0
self.total_discount = 0.0
self.localcontext.update({
'time': time,
'strip_name': self._strip_name,
'getpayments': self._get_payments,
'getsumdisc': self._get_sum_discount,
'gettotaloftheday': self._total_of_the_day,
'gettaxamount': self._get_tax_amount,
'pos_sales_details':self._pos_sales_details,
'getqtytotal2': self._get_qty_total_2,
'getsalestotal2': self._get_sales_total_2,
'getsuminvoice2':self._get_sum_invoice_2,
'getpaidtotal2': self._paid_total_2,
'getinvoice':self._get_invoice,
'get_user_names': self._get_user_names,
})
class report_pos_details(osv.AbstractModel):
_name = 'report.point_of_sale.report_detailsofsales'
_inherit = 'report.abstract_report'
_template = 'point_of_sale.report_detailsofsales'
_wrapped_report_class = pos_details
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gion86/awlsim | awlsimhw_debug/main.py | 1 | 2532 | # -*- coding: utf-8 -*-
#
# AWL simulator - Debug hardware interface
#
# Copyright 2013-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.hardware import *
from awlsim.core.operators import AwlOperator
from awlsim.core.datatypes import AwlOffset
class HardwareInterface(AbstractHardwareInterface):
name = "debug"
paramDescs = [
HwParamDesc_bool("dummyParam",
description = "Unused dummy parameter"),
]
def __init__(self, sim, parameters={}):
AbstractHardwareInterface.__init__(self,
sim = sim,
parameters = parameters)
def doStartup(self):
pass # Do nothing
def doShutdown(self):
pass # Do nothing
def readInputs(self):
# Get the first input dword and write it back.
dword = self.sim.cpu.fetch(AwlOperator(AwlOperator.MEM_E,
32,
AwlOffset(self.inputAddressBase)))
dwordBytes = bytearray( ( ((dword >> 24) & 0xFF),
((dword >> 16) & 0xFF),
((dword >> 8) & 0xFF),
(dword & 0xFF) ) )
self.sim.cpu.storeInputRange(self.inputAddressBase,
dwordBytes)
def writeOutputs(self):
# Fetch a data range, but don't do anything with it.
outData = self.sim.cpu.fetchOutputRange(self.outputAddressBase,
512)
assert(outData)
def directReadInput(self, accessWidth, accessOffset):
if accessOffset < self.inputAddressBase:
return None
# Just read the current value from the CPU and return it.
return self.sim.cpu.fetch(AwlOperator(AwlOperator.MEM_E,
accessWidth,
AwlOffset(accessOffset)))
def directWriteOutput(self, accessWidth, accessOffset, data):
if accessOffset < self.outputAddressBase:
return False
# Just pretend we wrote it somewhere.
return True
| gpl-2.0 |
rajashreer7/autotest-client-tests | linux-tools/perl_XML_RegExp/perl_XML_RegExp.py | 4 | 1286 | #!/bin/python
import os, subprocess
import logging
from autotest.client import test
from autotest.client.shared import error
class perl_XML_RegExp(test.test):
"""
Autotest module for testing basic functionality
of perl_XML_RegExp
@author Ramya BS <ramyabs1@in.ibm.com> ##
"""
version = 1
nfail = 0
path = ''
def initialize(self):
"""
Sets the overall failure counter for the test.
"""
self.nfail = 0
logging.info('\n Test initialize successfully')
def run_once(self, test_path=''):
"""
Trigger test run
"""
try:
os.environ["LTPBIN"] = "%s/shared" %(test_path)
ret_val = subprocess.Popen(['./perl-XML-RegExp.sh'], cwd="%s/perl_XML_RegExp" %(test_path))
ret_val.communicate()
if ret_val.returncode != 0:
self.nfail += 1
except error.CmdError, e:
self.nfail += 1
logging.error("Test Failed: %s", e)
def postprocess(self):
if self.nfail != 0:
logging.info('\n nfails is non-zero')
raise error.TestError('\nTest failed')
else:
logging.info('\n Test completed successfully ')
| gpl-2.0 |
40223117cda/w16cdaa | static/Brython3.1.1-20150328-091302/Lib/_testcapi.py | 742 | 4231 |
CHAR_MAX = 127
CHAR_MIN = -128
DBL_MAX = 1.7976931348623157e+308
DBL_MIN = 2.2250738585072014e-308
FLT_MAX = 3.4028234663852886e+38
FLT_MIN = 1.1754943508222875e-38
INT_MAX = 2147483647
INT_MIN = -2147483648
LLONG_MAX = 9223372036854775807
LLONG_MIN = -9223372036854775808
LONG_MAX = 2147483647
LONG_MIN = -2147483648
PY_SSIZE_T_MAX = 2147483647
PY_SSIZE_T_MIN = -2147483648
SHRT_MAX = 32767
SHRT_MIN = -32768
SIZEOF_PYGC_HEAD = 16
UCHAR_MAX = 255
UINT_MAX = 4294967295
ULLONG_MAX = 18446744073709551615
ULONG_MAX = 4294967295
USHRT_MAX = 65535
__loader__ = "<_frozen_importlib.ExtensionFileLoader object at 0x00C98DD0>"
def _pending_threadfunc(*args,**kw):
pass
class _test_structmembersType(object):
pass
def _test_thread_state(*args,**kw):
pass
def argparsing(*args,**kw):
pass
def code_newempty(*args,**kw):
pass
def codec_incrementaldecoder(*args,**kw):
pass
def codec_incrementalencoder(*args,**kw):
pass
def crash_no_current_thread(*args,**kw):
pass
class error(Exception):
pass
def exception_print(*args,**kw):
pass
def getargs_B(*args,**kw):
pass
def getargs_H(*args,**kw):
pass
def getargs_I(*args,**kw):
pass
def getargs_K(*args,**kw):
pass
def getargs_L(*args,**kw):
pass
def getargs_Z(*args,**kw):
pass
def getargs_Z_hash(*args,**kw):
pass
def getargs_b(*args,**kw):
pass
def getargs_c(*args,**kw):
pass
def getargs_h(*args,**kw):
pass
def getargs_i(*args,**kw):
pass
def getargs_k(*args,**kw):
pass
def getargs_keyword_only(*args,**kw):
pass
def getargs_keywords(*args,**kw):
pass
def getargs_l(*args,**kw):
pass
def getargs_n(*args,**kw):
pass
def getargs_p(*args,**kw):
pass
def getargs_s(*args,**kw):
pass
def getargs_s_hash(*args,**kw):
pass
def getargs_s_star(*args,**kw):
pass
def getargs_tuple(*args,**kw):
pass
def getargs_u(*args,**kw):
pass
def getargs_u_hash(*args,**kw):
pass
def getargs_w_star(*args,**kw):
pass
def getargs_y(*args,**kw):
pass
def getargs_y_hash(*args,**kw):
pass
def getargs_y_star(*args,**kw):
pass
def getargs_z(*args,**kw):
pass
def getargs_z_hash(*args,**kw):
pass
def getargs_z_star(*args,**kw):
pass
class instancemethod(object):
pass
def make_exception_with_doc(*args,**kw):
pass
def make_memoryview_from_NULL_pointer(*args,**kw):
pass
def parse_tuple_and_keywords(*args,**kw):
pass
def pytime_object_to_time_t(*args,**kw):
pass
def pytime_object_to_timespec(*args,**kw):
pass
def pytime_object_to_timeval(*args,**kw):
pass
def raise_exception(*args,**kw):
pass
def raise_memoryerror(*args,**kw):
pass
def run_in_subinterp(*args,**kw):
pass
def set_exc_info(*args,**kw):
pass
def test_L_code(*args,**kw):
pass
def test_Z_code(*args,**kw):
pass
def test_capsule(*args,**kw):
pass
def test_config(*args,**kw):
pass
def test_datetime_capi(*args,**kw):
pass
def test_dict_iteration(*args,**kw):
pass
def test_empty_argparse(*args,**kw):
pass
def test_k_code(*args,**kw):
pass
def test_lazy_hash_inheritance(*args,**kw):
pass
def test_list_api(*args,**kw):
pass
def test_long_and_overflow(*args,**kw):
pass
def test_long_api(*args,**kw):
pass
def test_long_as_double(*args,**kw):
pass
def test_long_as_size_t(*args,**kw):
pass
def test_long_long_and_overflow(*args,**kw):
pass
def test_long_numbits(*args,**kw):
pass
def test_longlong_api(*args,**kw):
pass
def test_null_strings(*args,**kw):
pass
def test_s_code(*args,**kw):
pass
def test_string_from_format(*args,**kw):
pass
def test_string_to_double(*args,**kw):
pass
def test_u_code(*args,**kw):
pass
def test_unicode_compare_with_ascii(*args,**kw):
pass
def test_widechar(*args,**kw):
pass
def test_with_docstring(*args,**kw):
"""This is a pretty normal docstring."""
pass
def traceback_print(*args,**kw):
pass
def unicode_aswidechar(*args,**kw):
pass
def unicode_aswidecharstring(*args,**kw):
pass
def unicode_encodedecimal(*args,**kw):
pass
def unicode_transformdecimaltoascii(*args,**kw):
pass
| gpl-3.0 |
CN-UPB/OpenBarista | components/decaf-storage/decaf_storage/utils.py | 1 | 2772 | ##
# Copyright 2016 DECaF Project Group, University of Paderborn
# This file is part of the decaf orchestration framework
# All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
import re
from sqlalchemy import Column, TIMESTAMP, func, text, String
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declared_attr
from decaf_storage.json_base import JsonEncodedDict
__author__ = ''
class TimestampMixin(object):
created_at = Column(TIMESTAMP, default=func.now())
modified_at = Column(TIMESTAMP, default=func.now())
class MetaMixin(object):
meta = Column(JsonEncodedDict)
class HeaderMixin(object):
@declared_attr
def __tablename__(cls):
return calculate_tablename(cls.__name__) + 's'
@declared_attr
def __table_args__(cls):
return {'extend_existing': True}
uuid = Column(postgresql.UUID(True), server_default=text("uuid_generate_v4()"), primary_key=True)
name = Column(String(250), nullable=True)
description = Column(String(450), nullable=True)
class StdObject(HeaderMixin, TimestampMixin, MetaMixin):
def __repr__(self):
return '%s (%s)\n\tname: %s\n\tdescription: %s\n\tcreated at: %s\n\tmodified at: %s' % (
self.__class__.__name__,
self.uuid,
self.name,
self.description,
self.created_at,
self.modified_at)
def calculate_tablename(name):
"""
converts CamelCase to camel_case
Example:
calculate_tablename('HTTPResponseCodeXYZ')
'http_response_code_xyz'
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def generate_function_names(model):
classes_dict = dict([(name, cls) for name, cls in model.__dict__.items() if isinstance(cls, type)])
for cls_name, cls in classes_dict.iteritems():
for call_type in ['add', 'get', 'delete', 'update']:
yield (cls, cls_name, call_type)
def generate_api_function_names(model):
"""
:return: all function names for the storage component
derived from model classes
"""
for (cls, cls_name, call_type) in generate_function_names(model):
yield ('decaf_storage.%s_%s' % (call_type, calculate_tablename(cls_name)), cls, call_type)
def generate_functions(prefix, model):
for (cls, cls_name, call_type) in generate_function_names(model):
yield {
'name': '%s_%s' % (call_type, calculate_tablename(cls_name)),
'contract': '%s.%s_%s' % (prefix, call_type, calculate_tablename(cls_name))
}
| mpl-2.0 |
RicardoJohann/frappe | setup.py | 6 | 1633 | # imports - standard imports
import os, shutil
from distutils.command.clean import clean as Clean
from setuptools import setup, find_packages
import re, ast
# get version from __version__ variable in frappe/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
with open('frappe/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
class CleanCommand(Clean):
def run(self):
Clean.run(self)
basedir = os.path.abspath(os.path.dirname(__file__))
for relpath in ['build', '.cache', '.coverage', 'dist', 'frappe.egg-info']:
abspath = os.path.join(basedir, relpath)
if os.path.exists(abspath):
if os.path.isfile(abspath):
os.remove(abspath)
else:
shutil.rmtree(abspath)
for dirpath, dirnames, filenames in os.walk(basedir):
for filename in filenames:
_, extension = os.path.splitext(filename)
if extension in ['.pyc']:
abspath = os.path.join(dirpath, filename)
os.remove(abspath)
for dirname in dirnames:
if dirname in ['__pycache__']:
abspath = os.path.join(dirpath, dirname)
shutil.rmtree(abspath)
setup(
name='frappe',
version=version,
description='Metadata driven, full-stack web framework',
author='Frappe Technologies',
author_email='info@frappe.io',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires,
dependency_links=[
'https://github.com/frappe/python-pdfkit.git#egg=pdfkit'
],
cmdclass = \
{
'clean': CleanCommand
}
)
| mit |
tomwallis/PsyUtils | psyutils/image/_filters.py | 1 | 27269 | # fourier filtering functions.
from __future__ import print_function, division
import numpy as np
import psyutils as pu
def make_filter_generic(filt_name,
pixel_units=True,
zero_mean=True,
**kwargs):
""" A function intended for internal use, to remove redundancy in
make_filter_X named functions. This will set up axes and apply
the distribution function, with a few defaults if not specified.
You should use the named wrapper functions (e.g. `make_filter_lowpass`),
unless you're playing around.
Args:
filt_name (string):
the name of the filter to be made. Can be one of:
'lowpass', 'highpass',
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the cutoff
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
**kwargs:
A number of named (keyword) arguments for passing to the axis limits
or distribution function. Could be things like `cutoff` or `peak`.
Returns:
filt (matrix):
The filter; a 2D floating point numpy array.
"""
# for name, value in kwargs.items():
# print('{0} = {1}'.format(name, value))
# determine distribution function:
if filt_name == "lowpass":
f = pu.dist.lowpass(peak=kwargs['cutoff'],
include_border=kwargs['include_border'])
elif filt_name == "highpass":
f = pu.dist.highpass(peak=kwargs['cutoff'],
include_border=kwargs['include_border'])
elif filt_name == "gaussian":
f = pu.dist.gauss(peak=kwargs['peak'],
width=kwargs['width'])
elif filt_name == "log_gauss":
f = pu.dist.log_gauss(peak=kwargs['peak'],
width=kwargs['width'])
elif filt_name == "logexp":
f = pu.dist.log_exponential(peak=kwargs['peak'],
width=kwargs['width'])
elif filt_name == "log_cosine":
f = pu.dist.log_cosine(peak=kwargs['peak'])
elif filt_name == "alpha":
f = pu.dist.one_over_f(alpha=kwargs['alpha'])
elif filt_name == "ori_gaussian":
# this function is applied below.
'blah'
else:
raise ValueError(filt_name + " is not a recognised filter type...")
# make axes:
if pixel_units is True:
lims = pu.image.axes_limits_in_pixels(size=kwargs['size'])
r, a = pu.image.axes_polar(size=kwargs['size'], axes_limits=lims)
else:
raise Exception("Tom hasn't written not-pixel-unit code yet")
if filt_name == "ori_gaussian":
# this is adapted from Peter Bex's matlab code. Should one day put it
# into a function called on angular axes, but it's not simply f(a),
# since you need to do the angular conversion for the opposite
# phases.
peak_radians = kwargs['peak'] * np.pi / 180.
width_radians = kwargs['width'] * np.pi / 180.
sin_theta = np.sin(a)
cos_theta = np.cos(a)
ds = sin_theta * np.cos(peak_radians) - cos_theta * np.sin(peak_radians)
dc = cos_theta * np.cos(peak_radians) + sin_theta * np.sin(peak_radians)
dtheta = abs(np.arctan2(ds, dc)) # Absolute angular distance
filt = np.exp((-dtheta**2) / (2*width_radians**2)) # ang filter 1
if kwargs['symmetric'] is True:
peak_radians += np.pi # add 180 deg offset for other lobe
ds = sin_theta * np.cos(peak_radians) - cos_theta * np.sin(peak_radians)
dc = cos_theta * np.cos(peak_radians) + sin_theta * np.sin(peak_radians)
dtheta = abs(np.arctan2(ds, dc)) # Absolute angular distance
filt += np.exp((-dtheta**2) / (2*width_radians**2)) # ang filter 2
else:
filt = f(r)
filt = filt.astype(np.float)
if zero_mean is True:
filt[filt.shape[0]//2, filt.shape[1]//2] = 0.
return(filt)
def make_filter_lowpass(size, cutoff, include_border=True,
pixel_units=True,
zero_mean=True):
""" Make a low pass filter, which is a threshold applied to radial
distance axes. This function is internally a wrapper for
`make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
cutoff:
the cutoff frequency of the filter.
include_border (boolean):
If True, the cutoff frequency is included in the results, if False
it is excluded.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the cutoff
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
filt (matrix):
The filter; a 2D floating point numpy array.
Example:
# a lowpass filter of 64 pixels square whose cutoff is 8
# (radial distance in pixels, and therefore cycles per image):
filt = pu.image.make_filter_lowpass(64, cutoff=8)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='lowpass',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
cutoff=cutoff,
include_border=include_border)
return(filt)
def make_filter_highpass(size, cutoff, include_border=True,
pixel_units=True,
zero_mean=True):
""" Make a high pass filter, which is a threshold applied to radial
distance axes. This function is internally a wrapper for
`make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
cutoff:
the cutoff frequency of the filter.
include_border (boolean):
If True, the cutoff frequency is included in the results, if False
it is excluded.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the cutoff
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a highpass filter of 64 pixels square whose cutoff is 8
# (radial distance in pixels, and therefore cycles per image):
filt = pu.image.make_filter_highpass(64, cutoff=8)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='highpass',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
cutoff=cutoff,
include_border=include_border)
return(filt)
def make_filter_gaussian(size, peak, width,
pixel_units=True,
zero_mean=True):
""" Make a gaussian frequency filter, which is a gaussian
distribution applied to radial distance axes. This function is
internally a wrapper for `make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
peak:
the peak frequency of the filter (mean). If pixel_units is True,
will be in pixels from the centre (i.e. corresponds to cycles
per image of the filter peak).
width:
the width of the filter (sd).
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a gaussian filter of 64 pixels square:
filt = pu.image.make_filter_gaussian(64, peak=8, width=2)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='gaussian',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
peak=peak,
width=width)
return(filt)
def make_filter_log_gauss(size, peak, width,
pixel_units=True,
zero_mean=True):
""" Make a log-gaussian frequency filter, which is a log-gaussian
distribution applied to radial distance axes. This function is
internally a wrapper for `make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
peak:
the peak frequency of the filter (mean). If pixel_units is True,
will be in pixels from the centre (i.e. corresponds to cycles
per image of the filter peak).
width:
the width of the filter (log sd) -- TODO check units.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a log gaussian filter of 64 pixels square:
filt = pu.image.make_filter_log_gauss(64, peak=8, width=.2)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='log_gauss',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
peak=peak,
width=width)
return(filt)
def make_filter_log_exp(size, peak, width,
pixel_units=True,
zero_mean=True):
""" Make a log exponential frequency filter, which is a log exponential
distribution applied to radial distance axes. This function is
internally a wrapper for `make_filter_generic`.
The distribution is a rearranged version of equation 1 in
Bex (2010), (In) sensitivity to spatial distortion in natural scenes.
Journal of Vision.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
peak:
the peak frequency of the filter. If pixel_units is True, will be in
pixels from the centre (i.e. corresponds to cycles per image of the
filter peak).
width:
the half-bandwidth of the filter in octaves. So, width=0.5 gives a
filter with a 1 octave full width.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a logexp filter of 64 pixels square:
filt = pu.image.make_filter_logexp(64, peak=8, width=0.2)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='logexp',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
peak=peak,
width=width)
return(filt)
def make_filter_log_cosine(size, peak,
pixel_units=True,
zero_mean=True):
""" Make a log cosine frequency filter, which is a log cosine
distribution applied to radial distance axes. This function is
internally a wrapper for `make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
peak:
the peak frequency of the filter. If pixel_units is True, will be in
pixels from the centre (i.e. corresponds to cycles per image of the
filter peak).
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a log cosine filter of 64 pixels square:
filt = pu.image.make_filter_cosine(64, peak=8)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='log_cosine',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
peak=peak)
return(filt)
def make_filter_alpha_over_f(size, alpha,
pixel_units=True,
zero_mean=True):
""" Make an alpha-over-f filter, where frequency falls off with log slope
of alpha. If alpha=1, this is a 1/f filter. This function is
internally a wrapper for `make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
alpha:
the negative log-log slope of power falloff. If alpha=1, this
gives a 1/f filter.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a 1/f filter of 64 pixels square:
filt = pu.image.make_filter_alpha_over_f(64, alpha=1)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='alpha',
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
alpha=alpha)
return(filt)
def make_filter_orientation_gaussian(size, peak, width,
symmetric=True,
pixel_units=True,
zero_mean=True):
""" Make a gaussian orientation filter, which is a gaussian
distribution applied to angular distance axes. This function is
internally a wrapper for `make_filter_generic`.
Args:
size:
size of the resulting matrix in w, h (i.e., number of columns and
rows respectively). If a scalar is given, result is square.
peak:
the peak frequency of the filter (mean) in degrees. The effects
herein are described according to what happens to the image
after filtering, rather than the appearance of the filter itself.
0 is vertical, 45 is "oblique up and left", 90 is horizontal, 135
gives "up and right".
width:
the width of the filter (sd in degrees).
symmetric:
should the returned filter be symmetric ("bowtie") shaped? If True,
returned filter is a bowtie, and orientations of opposite polarity
are pooled. If "False", returned filter is a "wedge" and angles
need to run 0-360 to get all directions.
pixel_units (boolean):
If True, units are in pixels of the array. This means that the
parameters of the filter are in pixel units -- so the peak
frequency is in cycles per image.
zero_mean (boolean):
If True, the zero-frequency component of the filter is zero,
meaning that multiplication by an image in the fourier domain
will return a zero-mean image. If False, the zero-frequency
component is maintained, meaning the filtered image will have the
same mean as the original.
Returns:
a (matrix):
The filter; a 2D floating point numpy array.
Example:
# a gaussian orientation filter of 64 pixels square:
filt = pu.image.make_filter_gaussian(64, peak=0, width=20)
# filter an image with it:
im = np.random.uniform(size=(64, 64))
im2 = pu.image.filter_image(im, filt)
show_im(im2)
"""
filt = pu.image.make_filter_generic(filt_name='ori_gaussian',
symmetric=symmetric,
pixel_units=pixel_units,
zero_mean=zero_mean,
size=size,
peak=peak,
width=width)
return(filt)
# def make_filter(im_x, filt_type,
# f_peak=None, bw=None, alpha=None):
# """Function to make a range of basic filters.
# Applied in the fourier domain. Currently for square images only.
# Tom Wallis adapted it from makeFilter Matlab function by Peter Bex.
# Args:
# im_x (int): the size of the filter image.
# must be an integer. This specifies the side length, so
# im_x=256 gives you a 256 by 256 image.
# filt_type (string): which filter to use.
# This specifies the behaviour of the rest of the function
# (see below).
# f_peak (float): the filter peak (depends on filter; see below).
# bw (float): the filter bandwidth (see below).
# alpha (float): the exponent for a 1 / f^-alpha filer.
# Returns:
# image (float): the square filter, with the zero-frequency component
# at the centre of the image (i.e. not fftshifted).
# Filter Types:
# TODO docstring.
# Example:
# Create a log exponential filter with the default settings::
# filt = pu.image.make_filter(im_x=256, filt_type="log_exp")
# pu.image.show_im(filt)
# Create an orientation filter with filter peak at 45 degrees and 10
# degrees of bandwidth::
# filt = pu.image.make_filter(im_x=256, filt_type="orientation",
# f_peak = 45, bw = 10)
# pu.image.show_im(filt)
# See Also:
# image.make_filtered_noise()
# image.filter_image()
# """
# # check im_x:
# im_x = float(round(im_x))
# radius = round(im_x / 2.0)
# x = np.linspace(- radius, radius, num=im_x)
# # meshgrid by default in cartesian coords:
# xx, yy = np.meshgrid(x, x)
# rad_dist = (xx**2 + yy**2) ** 0.5
# rad_dist[radius-1, radius-1] = 0.5 # avoid log / divide by zero problems.
# if filt_type is "log_exp":
# # set up default parameters:
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# if bw is None:
# bw = 0.2 # bandwidth in log pixels.
# else:
# bw = float(bw)
# filt = np.exp(-((np.log(2)*(abs(np.log(rad_dist/f_peak)))**3) /
# ((bw*np.log(2))**3)))
# elif filt_type is "1_f":
# if alpha is None:
# alpha = 1.0
# else:
# alpha = float(alpha)
# filt = rad_dist ** -alpha
# elif filt_type is "log_cosine":
# # set up default parameters:
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# rad_dist = np.log2(rad_dist)
# filt = 0.5 * (1+np.cos(np.pi*(rad_dist-np.log2(f_peak))))
# filt[rad_dist > (np.log2(f_peak)+1)] = 0
# filt[rad_dist <= (np.log2(f_peak)-1)] = 0
# elif filt_type is "log_gauss":
# # set up default parameters:
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# if bw is None:
# bw = 0.2 # bandwidth in log pixels.
# else:
# bw = float(bw)
# filt = np.exp(-((np.log2(rad_dist)-np.log2(f_peak))**2) / (2*(bw))**2)
# elif filt_type is "gauss":
# # set up default parameters:
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# if bw is None:
# bw = 20. # bandwidth in pixels.
# else:
# bw = float(bw)
# filt = np.exp(-((rad_dist-f_peak)**2) / (2*bw**2))
# elif filt_type is "high_pass":
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# filt = np.zeros(rad_dist.shape)
# filt[rad_dist >= abs(f_peak)] = 1
# elif filt_type is "low_pass":
# if f_peak is None:
# f_peak = im_x / 4.0
# else:
# f_peak = float(f_peak)
# filt = np.zeros(rad_dist.shape)
# filt[rad_dist <= abs(f_peak)] = 1
# elif filt_type is "orientation":
# # set up default parameters:
# if f_peak is None:
# f_peak = 0
# else:
# f_peak = float(f_peak)
# if bw is None:
# bw = 15 # bandwidth in degrees.
# else:
# bw = float(bw)
# # convert params to radians:
# f_peak = f_peak * np.pi / 180
# bw = bw * np.pi / 180
# ang_dist = np.arctan2(-yy, xx)
# sin_theta = np.sin(ang_dist)
# cos_theta = np.cos(ang_dist)
# ds = sin_theta * np.cos(f_peak) - cos_theta * np.sin(f_peak)
# dc = cos_theta * np.cos(f_peak) + sin_theta * np.sin(f_peak)
# dtheta = abs(np.arctan2(ds, dc)) # Absolute angular distance
# filt = np.exp((-dtheta**2) / (2*bw**2)) # ang filter component
# f_peak = f_peak + np.pi # 180 deg offset in +ve TFs
# ds = sin_theta * np.cos(f_peak) - cos_theta * np.sin(f_peak)
# dc = cos_theta * np.cos(f_peak) + sin_theta * np.sin(f_peak)
# dtheta = abs(np.arctan2(ds, dc)) # Absolute angular distance
# filt = filt + np.exp((-dtheta**2) / (2*bw**2)) # ang filter
# else:
# raise ValueError(filt_type + " is not a recognised filter type...")
# filt[radius, radius] = 0.0
# return(filt)
| mit |
maku77/contest | codejam/2014_Round1C/B-ReorderingTrainCars.py | 1 | 4080 | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
# Contest: Google Code Jam - 2014 Round 1C
# Problem: B. Reordering Train Cars
# URL: https://code.google.com/codejam/contest/3004486/dashboard#s=p1
# Author: Masatoshi Ohta
# Strategy:
# permutatable_count()
# aaa, a, abba のように先頭と末尾が同じ文字の car が n 個以上出てくる場合は、
# それぞれの順番は入れ替えが可能なので、n! だけパターン数が倍増する(順列)。
# (abba が出た時点で NG だが、ここでは気にしない(最後にまとめて弾く))
#
# create_trains()
# それぞれの車両 (car) をつなげて、結合された列車 (train) を作っていく。
# 任意の car を 1 つ取りだし、後ろにつなげられるだけ繋げていけばよい。
# ただし、aaa のような、先頭と末尾が同じ car は中間に入れたいので優先的に繋げる。
# 同様に、前にも繋げられるだけ繋げていく。
# これを全車両に適用すれば、列車 (train) のリスト trains ができあがる。
#
# is_valid_trains()
# この trains の組み合わせに、不整合がないかをチェック。
# (a) 1 つの列車内で分断された character が存在しないか
# (例: abba, abbacc, abbcbb はすべて NG)
# (b) 1 つの文字が複数の列車にまたがって配置されていないか
#
# この列車の順番は入れ替えが可能なので、結合された列車数が n だとすると、
# 最終的な組み合わせの数は、n! 倍に増加する。これが答え。
import sys
import math
def read_int(): return int(sys.stdin.readline())
def read_strs(): return sys.stdin.readline().split()
MOD = 1000000007
def solve(cars):
# 位置入れ替え可能(先頭と末尾が終わり)によるパターン数
count = permutatable_count(cars)
# 車両を繋げられるだけ繋いだできた列車のリストを作る
trains = create_trains(cars)
# その列車リストが条件を満たしていればパターン数を計算
if is_valid_trains(trains):
return count * math.factorial(len(trains)) % MOD
# 正しい列車リストは作れない
return 0
def permutatable_count(cars):
used = {}
for t in cars:
if t[0] == t[-1]:
used[t[0]] = used.get(t[0], 0) + 1
count = 1
for val in used.values():
count *= math.factorial(val)
return count % MOD
def create_trains(cars):
trains = []
while cars:
con = [cars.pop()] # 起点は適当に選ぶ
while connect_backward(con, cars): pass
while connect_forward(con, cars): pass
trains.append(''.join(con)) # ここで一連の文字列にしとく
return trains
def connect_backward(con, cars):
last_char = con[-1][-1]
candidate = -1
for i in range(len(cars)):
if cars[i][0] == last_char:
candidate = i
if cars[i][0] == cars[i][-1]:
break
if candidate >= 0:
con.append(cars[candidate])
del cars[candidate]
return True
return False
def connect_forward(con, cars):
first_char = con[0][0]
candidate = -1
for i in range(len(cars)):
if cars[i][-1] == first_char:
candidate = i
if cars[i][0] == cars[i][-1]:
break
if candidate >= 0:
con.insert(0, cars[candidate])
del cars[candidate]
return True
return False
def is_valid_trains(trains):
used = set()
for t in trains:
if t[0] in used: # 別の列車で使われている
return False
used.add(t[0])
for i in range(1, len(t)):
if t[i-1] != t[i]:
if t[i] in used:
return False
used.add(t[i])
return True
if __name__ == '__main__':
T = read_int()
for i in range(T):
N = read_int()
cars = read_strs()
print('Case #{}: {}'.format(i+1, solve(cars)))
| mit |
ghclara/Fatec_Scripts | tekton-master/backend/appengine/config/template_middleware.py | 35 | 2705 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from jinja2.exceptions import TemplateNotFound
from tekton import router
from tekton.gae.middleware.response import ResponseBase
from tekton.gae.middleware import Middleware
from config import template
class TemplateResponse(ResponseBase):
def __init__(self, context=None, template_path=None):
"""
Class to render template and send it through HTTP response
context: the context dict form template rendering
template_path: the path for te template. If None it will find the template by convention, according to path
"""
super(TemplateResponse, self).__init__(context)
self.template_path = template_path
class TemplateMiddleware(Middleware):
def set_up(self):
self.dependencies["_render"] = template.render
_TMPL_NOT_FOUND_MSG = '''Template not found
Looked by convention in /routes/templates directory for:
1) %s
2) %s
Create one of the two template files or explicit indicate which one to use on TemplateResponse'''
def render_by_convention(fcn, context):
template_path = router.to_path(fcn)
def try_render(suffix):
if template_path == '/':
return '/home.html', template.render('/home.html', context)
try:
template_file = template_path + suffix
return template_file, template.render(template_file, context)
except TemplateNotFound:
return template_file, None
template_1, tmpl_rendered = try_render('.html')
if tmpl_rendered is None:
template_2, tmpl_rendered = try_render('/home.html')
if tmpl_rendered is None:
raise TemplateNotFound(_TMPL_NOT_FOUND_MSG % (template_1, template_2))
return tmpl_rendered
class TemplateWriteMiddleware(Middleware):
def set_up(self):
fcn_response = self.dependencies['_fcn_response']
fcn = self.dependencies['_fcn']
if isinstance(fcn_response, TemplateResponse):
context = fcn_response.context or {}
for key in ('_logged_user', '_login_path', '_logout_path'):
context[key] = self.dependencies[key]
if '_csrf_code' in self.dependencies:
context['_csrf_code'] = self.dependencies['_csrf_code']
template_path = fcn_response.template_path
if template_path is None:
tmpl_rendered = render_by_convention(fcn, context)
else:
tmpl_rendered = template.render(template_path, context)
self.handler.response.write(tmpl_rendered)
return True # after response, there is no need to look for more middlewares | mit |
milodky/kernel_for_nexus7 | tools/perf/util/setup.py | 560 | 1379 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
phretor/django-academic | academic/apps/people/admin.py | 1 | 1371 | from django.contrib import admin
from django import forms
from django.db import models
from django.conf import settings
from academic.people.models import *
class PersonAdmin(admin.ModelAdmin):
filter_horizontal = [
'affiliation',]
list_display_links = (
'photo',
'first_name',
'last_name')
list_display = (
'photo',
'first_name',
'last_name',
'rank',
'public',
'current',
'alumni',
'visitor',
'e_mail',
'web_page',)
list_editable = (
'rank',
'public',
'current',
'alumni',
'visitor',
'e_mail',
'web_page')
list_filter = (
'public',
'current',
'visitor',
'alumni')
search_fields = (
'first_name',
'last_name',
'e_mail',)
admin.site.register(Person, PersonAdmin)
class PersonInlineForm(forms.ModelForm):
class Meta:
model = Person
fields = (
'public',
'first_name',
'last_name',
'e_mail')
class PersonInline(admin.TabularInline):
model = Person
form = PersonInlineForm
class RankAdmin(admin.ModelAdmin):
inlines = [
PersonInline, ]
list_display = (
'name',
'plural_name', )
admin.site.register(Rank, RankAdmin)
| bsd-3-clause |
ReganBell/QReview | networkx/utils/tests/test_misc.py | 40 | 3503 | # -*- encoding: utf-8 -*-
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.utils import *
def test_is_string_like():
assert_true(is_string_like("aaaa"))
assert_false(is_string_like(None))
assert_false(is_string_like(123))
def test_iterable():
assert_false(iterable(None))
assert_false(iterable(10))
assert_true(iterable([1,2,3]))
assert_true(iterable((1,2,3)))
assert_true(iterable({1:"A",2:"X"}))
assert_true(iterable("ABC"))
def test_graph_iterable():
K=nx.complete_graph(10)
assert_true(iterable(K))
assert_true(iterable(K.nodes_iter()))
assert_true(iterable(K.edges_iter()))
def test_is_list_of_ints():
assert_true(is_list_of_ints([1,2,3,42]))
assert_false(is_list_of_ints([1,2,3,"kermit"]))
def test_random_number_distribution():
# smoke test only
z=uniform_sequence(20)
z=powerlaw_sequence(20,exponent=2.5)
z=pareto_sequence(20,exponent=1.5)
z=discrete_sequence(20,distribution=[0,0,0,0,1,1,1,1,2,2,3])
def test_make_str_with_bytes():
import sys
PY2 = sys.version_info[0] == 2
x = "qualité"
y = make_str(x)
if PY2:
assert_true(isinstance(y, unicode))
# Since file encoding is utf-8, the é will be two bytes.
assert_true(len(y) == 8)
else:
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
def test_make_str_with_unicode():
import sys
PY2 = sys.version_info[0] == 2
if PY2:
x = unicode("qualité", encoding='utf-8')
y = make_str(x)
assert_true(isinstance(y, unicode))
assert_true(len(y) == 7)
else:
x = "qualité"
y = make_str(x)
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
class TestNumpyArray(object):
@classmethod
def setupClass(cls):
global numpy
global assert_allclose
try:
import numpy
from numpy.testing import assert_allclose
except ImportError:
raise SkipTest('NumPy not available.')
def test_dict_to_numpy_array1(self):
d = {'a':1,'b':2}
a = dict_to_numpy_array1(d, mapping={'a':0, 'b':1})
assert_allclose(a, numpy.array([1,2]))
a = dict_to_numpy_array1(d, mapping={'b':0, 'a':1})
assert_allclose(a, numpy.array([2,1]))
a = dict_to_numpy_array1(d)
assert_allclose(a.sum(), 3)
def test_dict_to_numpy_array2(self):
d = {'a': {'a':1,'b':2},
'b': {'a':10,'b':20}}
mapping = {'a':1, 'b': 0}
a = dict_to_numpy_array2(d, mapping=mapping)
assert_allclose(a, numpy.array([[20,10],[2,1]]))
a = dict_to_numpy_array2(d)
assert_allclose(a.sum(), 33)
def test_dict_to_numpy_array_a(self):
d = {'a': {'a':1,'b':2},
'b': {'a':10,'b':20}}
mapping = {'a':0, 'b': 1}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([[1,2],[10,20]]))
mapping = {'a':1, 'b': 0}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([[20,10],[2,1]]))
a = dict_to_numpy_array2(d)
assert_allclose(a.sum(), 33)
def test_dict_to_numpy_array_b(self):
d = {'a':1,'b':2}
mapping = {'a': 0, 'b': 1}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([1,2]))
a = dict_to_numpy_array1(d)
assert_allclose(a.sum(), 3)
| bsd-3-clause |
protatremy/buildbot | master/buildbot/steps/source/repo.py | 10 | 19367 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import re
import textwrap
from twisted.internet import defer
from twisted.internet import reactor
from zope.interface import implementer
from buildbot import util
from buildbot.interfaces import IRenderable
from buildbot.process import buildstep
from buildbot.steps.source.base import Source
@implementer(IRenderable)
class RepoDownloadsFromProperties(util.ComparableMixin, object):
parse_download_re = (re.compile(r"repo download ([^ ]+) ([0-9]+/[0-9]+)"),
re.compile(r"([^ ]+) ([0-9]+/[0-9]+)"),
re.compile(r"([^ ]+)/([0-9]+/[0-9]+)"),
)
compare_attrs = ('names',)
def __init__(self, names):
self.names = names
def getRenderingFor(self, props):
downloads = []
for propName in self.names:
s = props.getProperty(propName)
if s is not None:
downloads.extend(self.parseDownloadProperty(s))
return downloads
def parseDownloadProperty(self, s):
"""
lets try to be nice in the format we want
can support several instances of "repo download proj number/patch" (direct copy paste from gerrit web site)
or several instances of "proj number/patch" (simpler version)
This feature allows integrator to build with several pending interdependent changes.
returns list of repo downloads sent to the worker
"""
if s is None:
return []
ret = []
for cur_re in self.parse_download_re:
res = cur_re.search(s)
while res:
ret.append("%s %s" % (res.group(1), res.group(2)))
s = s[:res.start(0)] + s[res.end(0):]
res = cur_re.search(s)
return ret
@implementer(IRenderable)
class RepoDownloadsFromChangeSource(util.ComparableMixin, object):
compare_attrs = ('codebase',)
def __init__(self, codebase=None):
self.codebase = codebase
def getRenderingFor(self, props):
downloads = []
if self.codebase is None:
changes = props.getBuild().allChanges()
else:
changes = props.getBuild().getSourceStamp(self.codebase).changes
for change in changes:
if ("event.type" in change.properties and
change.properties["event.type"] == "patchset-created"):
downloads.append("%s %s/%s" % (change.properties["event.change.project"],
change.properties[
"event.change.number"],
change.properties["event.patchSet.number"]))
return downloads
class Repo(Source):
""" Class for Repo with all the smarts """
name = 'repo'
renderables = ["manifestURL", "manifestBranch", "manifestFile", "tarball", "jobs",
"syncAllBranches", "updateTarballAge", "manifestOverrideUrl",
"repoDownloads", "depth"]
ref_not_found_re = re.compile(r"fatal: Couldn't find remote ref")
cherry_pick_error_re = re.compile(r"|".join([r"Automatic cherry-pick failed",
r"error: "
r"fatal: "
r"possibly due to conflict resolution."]))
re_change = re.compile(r".* refs/changes/\d\d/(\d+)/(\d+) -> FETCH_HEAD$")
re_head = re.compile(r"^HEAD is now at ([0-9a-f]+)...")
# number of retries, if we detect mirror desynchronization
mirror_sync_retry = 10
# wait 1min between retries (thus default total retry time is 10min)
mirror_sync_sleep = 60
def __init__(self,
manifestURL=None,
manifestBranch="master",
manifestFile="default.xml",
tarball=None,
jobs=None,
syncAllBranches=False,
updateTarballAge=7 * 24.0 * 3600.0,
manifestOverrideUrl=None,
repoDownloads=None,
depth=0,
**kwargs):
"""
@type manifestURL: string
@param manifestURL: The URL which points at the repo manifests repository.
@type manifestBranch: string
@param manifestBranch: The manifest branch to check out by default.
@type manifestFile: string
@param manifestFile: The manifest to use for sync.
@type syncAllBranches: bool.
@param syncAllBranches: true, then we must slowly synchronize all branches.
@type updateTarballAge: float
@param updateTarballAge: renderable to determine the update tarball policy,
given properties
Returns: max age of tarball in seconds, or None, if we
want to skip tarball update
@type manifestOverrideUrl: string
@param manifestOverrideUrl: optional http URL for overriding the manifest
usually coming from Property setup by a ForceScheduler
@type repoDownloads: list of strings
@param repoDownloads: optional repo download to perform after the repo sync
@type depth: integer
@param depth: optional depth parameter to repo init.
If specified, create a shallow clone with given depth.
"""
self.manifestURL = manifestURL
self.manifestBranch = manifestBranch
self.manifestFile = manifestFile
self.tarball = tarball
self.jobs = jobs
self.syncAllBranches = syncAllBranches
self.updateTarballAge = updateTarballAge
self.manifestOverrideUrl = manifestOverrideUrl
if repoDownloads is None:
repoDownloads = []
self.repoDownloads = repoDownloads
self.depth = depth
Source.__init__(self, **kwargs)
assert self.manifestURL is not None
def computeSourceRevision(self, changes):
if not changes:
return None
return changes[-1].revision
def filterManifestPatches(self):
"""
Patches to manifest projects are a bit special.
repo does not support a way to download them automatically,
so we need to implement the boilerplate manually.
This code separates the manifest patches from the other patches,
and generates commands to import those manifest patches.
"""
manifest_unrelated_downloads = []
manifest_related_downloads = []
for download in self.repoDownloads:
project, ch_ps = download.split(" ")[-2:]
if (self.manifestURL.endswith("/" + project) or
self.manifestURL.endswith("/" + project + ".git")):
ch, ps = map(int, ch_ps.split("/"))
branch = "refs/changes/%02d/%d/%d" % (ch % 100, ch, ps)
manifest_related_downloads.append(
["git", "fetch", self.manifestURL, branch])
manifest_related_downloads.append(
["git", "cherry-pick", "FETCH_HEAD"])
else:
manifest_unrelated_downloads.append(download)
self.repoDownloads = manifest_unrelated_downloads
self.manifestDownloads = manifest_related_downloads
def _repoCmd(self, command, abandonOnFailure=True, **kwargs):
return self._Cmd(["repo"] + command, abandonOnFailure=abandonOnFailure, **kwargs)
def _Cmd(self, command, abandonOnFailure=True, workdir=None, **kwargs):
if workdir is None:
workdir = self.workdir
cmd = buildstep.RemoteShellCommand(workdir, command,
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout, **kwargs)
self.lastCommand = cmd
# does not make sense to logEnviron for each command (just for first)
self.logEnviron = False
cmd.useLog(self.stdio_log, False)
self.stdio_log.addHeader(
"Starting command: %s\n" % (" ".join(command), ))
self.step_status.setText(["%s" % (" ".join(command[:2]))])
d = self.runCommand(cmd)
@d.addCallback
def evaluateCommand(_):
if abandonOnFailure and cmd.didFail():
self.descriptionDone = "repo failed at: %s" % (
" ".join(command[:2]))
self.stdio_log.addStderr(
"Source step failed while running command %s\n" % cmd)
raise buildstep.BuildStepFailed()
return cmd.rc
return d
def repoDir(self):
return self.build.path_module.join(self.workdir, ".repo")
def sourcedirIsUpdateable(self):
return self.pathExists(self.repoDir())
def startVC(self, branch, revision, patch):
d = self.doStartVC()
d.addErrback(self.failed)
@defer.inlineCallbacks
def doStartVC(self):
self.stdio_log = self.addLogForRemoteCommands("stdio")
self.filterManifestPatches()
if self.repoDownloads:
self.stdio_log.addHeader(
"will download:\n" + "repo download " + "\nrepo download ".join(self.repoDownloads) + "\n")
self.willRetryInCaseOfFailure = True
d = self.doRepoSync()
@d.addErrback
def maybeRetry(why):
# in case the tree was corrupted somehow because of previous build
# we clobber one time, and retry everything
if why.check(buildstep.BuildStepFailed) and self.willRetryInCaseOfFailure:
self.stdio_log.addStderr("got issue at first try:\n" + str(why) +
"\nRetry after clobber...")
return self.doRepoSync(forceClobber=True)
return why # propagate to self.failed
yield d
yield self.maybeUpdateTarball()
# starting from here, clobbering will not help
yield self.doRepoDownloads()
self.setStatus(self.lastCommand, 0)
yield self.finished(0)
@defer.inlineCallbacks
def doClobberStart(self):
yield self.runRmdir(self.workdir)
yield self.runMkdir(self.workdir)
yield self.maybeExtractTarball()
@defer.inlineCallbacks
def doRepoSync(self, forceClobber=False):
updatable = yield self.sourcedirIsUpdateable()
if not updatable or forceClobber:
# no need to re-clobber in case of failure
self.willRetryInCaseOfFailure = False
yield self.doClobberStart()
yield self.doCleanup()
yield self._repoCmd(['init',
'-u', self.manifestURL,
'-b', self.manifestBranch,
'-m', self.manifestFile,
'--depth', str(self.depth)])
if self.manifestOverrideUrl:
self.stdio_log.addHeader(
"overriding manifest with %s\n" % (self.manifestOverrideUrl))
local_file = yield self.pathExists(self.build.path_module.join(self.workdir,
self.manifestOverrideUrl))
if local_file:
yield self._Cmd(["cp", "-f", self.manifestOverrideUrl, "manifest_override.xml"])
else:
yield self._Cmd(["wget", self.manifestOverrideUrl, "-O", "manifest_override.xml"])
yield self._Cmd(["ln", "-sf", "../manifest_override.xml", "manifest.xml"],
workdir=self.build.path_module.join(self.workdir, ".repo"))
for command in self.manifestDownloads:
yield self._Cmd(command, workdir=self.build.path_module.join(self.workdir, ".repo", "manifests"))
command = ['sync']
if self.jobs:
command.append('-j' + str(self.jobs))
if not self.syncAllBranches:
command.append('-c')
self.step_status.setText(["repo sync"])
self.stdio_log.addHeader("synching manifest %s from branch %s from %s\n"
% (self.manifestFile, self.manifestBranch, self.manifestURL))
yield self._repoCmd(command)
command = ['manifest', '-r', '-o', 'manifest-original.xml']
yield self._repoCmd(command)
# check whether msg matches one of the
# compiled regexps in self.re_error_messages
def _findErrorMessages(self, error_re):
for logname in ['stderr', 'stdout']:
if not hasattr(self.lastCommand, logname):
continue
msg = getattr(self.lastCommand, logname)
if not (re.search(error_re, msg) is None):
return True
return False
def _sleep(self, delay):
d = defer.Deferred()
reactor.callLater(delay, d.callback, 1)
return d
@defer.inlineCallbacks
def doRepoDownloads(self):
self.repo_downloaded = ""
for download in self.repoDownloads:
command = ['download'] + download.split(' ')
self.stdio_log.addHeader("downloading changeset %s\n"
% (download))
retry = self.mirror_sync_retry + 1
while retry > 0:
yield self._repoCmd(command, abandonOnFailure=False,
collectStdout=True, collectStderr=True)
if not self._findErrorMessages(self.ref_not_found_re):
break
retry -= 1
self.stdio_log.addStderr(
"failed downloading changeset %s\n" % (download))
self.stdio_log.addHeader("wait one minute for mirror sync\n")
yield self._sleep(self.mirror_sync_sleep)
if retry == 0:
self.descriptionDone = "repo: change %s does not exist" % download
raise buildstep.BuildStepFailed()
if self.lastCommand.didFail() or self._findErrorMessages(self.cherry_pick_error_re):
# cherry pick error! We create a diff with status current workdir
# in stdout, which reveals the merge errors and exit
command = ['forall', '-c', 'git', 'diff', 'HEAD']
yield self._repoCmd(command, abandonOnFailure=False)
self.descriptionDone = "download failed: %s" % download
raise buildstep.BuildStepFailed()
if hasattr(self.lastCommand, 'stderr'):
lines = self.lastCommand.stderr.split("\n")
match1 = match2 = False
for line in lines:
if not match1:
match1 = self.re_change.match(line)
if not match2:
match2 = self.re_head.match(line)
if match1 and match2:
self.repo_downloaded += "%s/%s %s " % (match1.group(1),
match1.group(2),
match2.group(1))
self.setProperty("repo_downloaded", self.repo_downloaded, "Source")
def computeTarballOptions(self):
# Keep in mind that the compression part of tarball generation
# can be non negligible
tar = ['tar']
if self.tarball.endswith("gz"):
tar.append('-z')
if self.tarball.endswith("bz2") or self.tarball.endswith("bz"):
tar.append('-j')
if self.tarball.endswith("lzma"):
tar.append('--lzma')
if self.tarball.endswith("lzop"):
tar.append('--lzop')
return tar
@defer.inlineCallbacks
def maybeExtractTarball(self):
if self.tarball:
tar = self.computeTarballOptions() + ['-xvf', self.tarball]
res = yield self._Cmd(tar, abandonOnFailure=False)
if res: # error with tarball.. erase repo dir and tarball
yield self._Cmd(["rm", "-f", self.tarball], abandonOnFailure=False)
yield self.runRmdir(self.repoDir(), abandonOnFailure=False)
@defer.inlineCallbacks
def maybeUpdateTarball(self):
if not self.tarball or self.updateTarballAge is None:
return
# tarball path is absolute, so we cannot use worker's stat command
# stat -c%Y gives mtime in second since epoch
res = yield self._Cmd(["stat", "-c%Y", self.tarball], collectStdout=True, abandonOnFailure=False)
if not res:
tarball_mtime = int(self.lastCommand.stdout)
yield self._Cmd(["stat", "-c%Y", "."], collectStdout=True)
now_mtime = int(self.lastCommand.stdout)
age = now_mtime - tarball_mtime
if res or age > self.updateTarballAge:
tar = self.computeTarballOptions() + \
['-cvf', self.tarball, ".repo"]
res = yield self._Cmd(tar, abandonOnFailure=False)
if res: # error with tarball.. erase tarball, but don't fail
yield self._Cmd(["rm", "-f", self.tarball], abandonOnFailure=False)
# a simple shell script to gather all cleanup tweaks...
# doing them one by one just complicate the stuff
# and mess up the stdio log
def _getCleanupCommand(self):
"""also used by tests for expectations"""
return textwrap.dedent("""\
set -v
if [ -d .repo/manifests ]
then
# repo just refuse to run if manifest is messed up
# so ensure we are in a known state
cd .repo/manifests
rm -f .git/index.lock
git fetch origin
git reset --hard remotes/origin/%(manifestBranch)s
git config branch.default.merge %(manifestBranch)s
cd ..
ln -sf manifests/%(manifestFile)s manifest.xml
cd ..
fi
repo forall -c rm -f .git/index.lock
repo forall -c git clean -f -d -x 2>/dev/null
repo forall -c git reset --hard HEAD 2>/dev/null
rm -f %(workdir)s/.repo/project.list
""") % dict(manifestBranch=self.manifestBranch,
manifestFile=self.manifestFile,
workdir=self.workdir)
def doCleanup(self):
command = self._getCleanupCommand()
return self._Cmd(["bash", "-c", command], abandonOnFailure=False)
| gpl-2.0 |
mlavin/django | tests/model_package/tests.py | 133 | 2627 | from django.db import connection, models
from django.db.backends.utils import truncate_name
from django.test import TestCase
from .models.article import Article, Site
from .models.publication import Publication
class Advertisement(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", blank=True)
class ModelPackageTests(TestCase):
def test_m2m_tables_in_subpackage_models(self):
"""
Regression for #12168: models split into subpackages still get M2M
tables.
"""
p = Publication.objects.create(title="FooBar")
site = Site.objects.create(name="example.com")
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
def test_models_in_the_test_package(self):
"""
Regression for #12245 - Models can exist in the test package, too.
"""
p = Publication.objects.create(title="FooBar")
ad = Advertisement.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisement.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
def test_automatic_m2m_column_names(self):
"""
Regression for #12386 - field names on the autogenerated intermediate
class that are specified as dotted strings don't retain any path
component for the field or column name.
"""
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_db_table(),
truncate_name('model_package_article_publications', connection.ops.max_name_length()),
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| bsd-3-clause |
kmunve/pysenorge | pysenorge/themes/run_wind_1500m.py | 1 | 1212 | __docformat__ = 'reStructuredText'
'''
Doc...
:Author: kmu
:Created: 08. June 2010
'''
import os
import logging
from datetime import timedelta, datetime
execfile(os.path.join(os.path.dirname(__file__), "set_pysenorge_path.py"))
from pysenorge.tools.date_converters import iso2datetime
scriptname = "wind_1500m_daily.py"
LOG_FILENAME = os.path.join(os.path.expanduser("~"),
scriptname.split('.')[0]+'.log')
logging.basicConfig(filename=LOG_FILENAME,level=logging.INFO)
logging.info('Script started: %s' % datetime.now().isoformat())
start_date = "2011-05-27"
end_date = "2011-06-08"
start_date = iso2datetime(start_date+"T00:00:00")
end_date = iso2datetime(end_date+"T00:00:00")
dt = timedelta(days=1)
while start_date < end_date:
start_date
strdate = "%s-%s-%s" % (str(start_date.year).zfill(2),
str(start_date.month).zfill(2),
str(start_date.day).zfill(2))
script = os.path.join(os.path.dirname(__file__), scriptname)
os.system("python %s %s" % (script, strdate))
start_date = start_date+dt
logging.info('Script finished: %s' % datetime.now().isoformat()) | gpl-3.0 |
davisein/jitsudone | django/contrib/gis/tests/test_spatialrefsys.py | 88 | 6775 | from django.db import connection
from django.contrib.gis.gdal import GDAL_VERSION
from django.contrib.gis.tests.utils import no_mysql, oracle, postgis, spatialite
from django.utils import unittest
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
if oracle:
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
if GDAL_VERSION <= (1, 8):
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
chromium/chromium | tools/android/dependency_analysis/upload_html_viewer.py | 8 | 3251 | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Deploy the Dependency Graph Viewer to Firebase hosting."""
import shutil
import subprocess
import tempfile
from pathlib import Path
FIREBASE_PROJECT = 'chromium-dependency-graph'
JS_DIR = Path(__file__).parent / 'js'
def _Prompt(message):
"""Prompt the user with a message and request affirmative outcome."""
choice = input(message + ' [y/N] ').lower()
return choice and choice[0] == 'y'
def _FirebaseLogin():
"""Login into the Firebase CLI"""
subprocess.check_call(['firebase', 'login'])
def _CheckFirebaseCLI():
"""Fail with a proper error message, if Firebase CLI is not installed."""
if subprocess.call(['firebase', '--version'],
stdout=subprocess.DEVNULL) != 0:
link = 'https://firebase.google.com/docs/cli#install_the_firebase_cli'
raise Exception(
'Firebase CLI not installed or not on your PATH. Follow '
'the instructions at ' + link + ' to install')
def _CheckNPM():
"""Fail with a proper error message, if npm is not installed."""
if subprocess.call(['npm', '--version'], stdout=subprocess.DEVNULL) != 0:
link = 'https://nodejs.org'
raise Exception(
'npm not installed or not on your PATH. Either install Node.js '
'through your package manager or download it from ' + link + '.')
def _BuildDist():
"""Build distribution files."""
subprocess.check_call(['npm', 'run', '--prefix', JS_DIR, 'build'])
return JS_DIR / 'dist'
def _FirebaseInitProjectDir(project_dir):
"""Create a firebase.json file that is needed for deployment."""
project_static_dir = project_dir / 'public'
with open(project_dir / 'firebase.json', 'w') as f:
f.write("""
{
"hosting": {
"public": "public",
"ignore": [
"firebase.json",
"**/README*",
"**/.*"
]
}
}
""")
return project_static_dir
def _FirebaseDeploy(project_dir):
"""Deploy the project to firebase hosting."""
subprocess.check_call(['firebase', 'deploy', '-P', FIREBASE_PROJECT],
cwd=project_dir)
def _CopyDistFiles(dist_dir, project_static_dir):
"""Copy over static files from the dist directory."""
shutil.copytree(dist_dir, project_static_dir)
def main():
message = (
f"""This script builds the Clank Dependency Graph Visualizer and \
deploys it to Firebase hosting at {FIREBASE_PROJECT}.firebaseapp.com.
Please ensure you have read the instructions at //{JS_DIR}/README.md first \
before running this.
Are you sure you want to continue?""")
if not _Prompt(message):
print('Nothing was deployed.')
return
_CheckFirebaseCLI()
_CheckNPM()
_FirebaseLogin()
dist_dir = _BuildDist()
with tempfile.TemporaryDirectory(prefix='firebase-') as project_dir_str:
project_dir = Path(project_dir_str)
project_static_dir = _FirebaseInitProjectDir(project_dir)
shutil.copytree(dist_dir, project_static_dir)
_FirebaseDeploy(project_dir)
if __name__ == '__main__':
main()
| bsd-3-clause |
MonicaHsu/truvaluation | venv/lib/python2.7/distutils/config.py | 64 | 4131 | """distutils.pypirc
Provides the PyPIRCCommand class, the base class for the command classes
that uses .pypirc in the distutils.command package.
"""
import os
from ConfigParser import ConfigParser
from distutils.cmd import Command
DEFAULT_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:%s
password:%s
"""
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % \
DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server')]
boolean_options = ['show-response']
def _get_rc_file(self):
"""Returns rc file path."""
return os.path.join(os.path.expanduser('~'), '.pypirc')
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
f = os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0600), 'w')
try:
f.write(DEFAULT_PYPIRC % (username, password))
finally:
f.close()
def _read_pypirc(self):
"""Reads the .pypirc file."""
rc = self._get_rc_file()
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
repository = self.repository or self.DEFAULT_REPOSITORY
config = ConfigParser()
config.read(rc)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM),
('password', None)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
def initialize_options(self):
"""Initialize options."""
self.repository = None
self.realm = None
self.show_response = 0
def finalize_options(self):
"""Finalizes options."""
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
if self.realm is None:
self.realm = self.DEFAULT_REALM
| mit |
clef/python-social-auth | social/backends/weixin.py | 49 | 3533 | # -*- coding: utf-8 -*-
# author:duoduo3369@gmail.com https://github.com/duoduo369
"""
Weixin OAuth2 backend
"""
from requests import HTTPError
from social.backends.oauth import BaseOAuth2
from social.exceptions import AuthCanceled, AuthUnknownError
class WeixinOAuth2(BaseOAuth2):
"""Weixin OAuth authentication backend"""
name = 'weixin'
ID_KEY = 'openid'
AUTHORIZATION_URL = 'https://open.weixin.qq.com/connect/qrconnect'
ACCESS_TOKEN_URL = 'https://api.weixin.qq.com/sns/oauth2/access_token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('nickname', 'username'),
('headimgurl', 'profile_image_url'),
]
def get_user_details(self, response):
"""Return user details from Weixin. API URL is:
https://api.weixin.qq.com/sns/userinfo
"""
if self.setting('DOMAIN_AS_USERNAME'):
username = response.get('domain', '')
else:
username = response.get('nickname', '')
return {
'username': username,
'profile_image_url': response.get('headimgurl', '')
}
def user_data(self, access_token, *args, **kwargs):
data = self.get_json('https://api.weixin.qq.com/sns/userinfo', params={
'access_token': access_token,
'openid': kwargs['response']['openid']
})
nickname = data.get('nickname')
if nickname:
# weixin api has some encode bug, here need handle
data['nickname'] = nickname.encode('raw_unicode_escape').decode('utf-8')
return data
def auth_params(self, state=None):
appid, secret = self.get_key_and_secret()
params = {
'appid': appid,
'redirect_uri': self.get_redirect_uri(state)
}
if self.STATE_PARAMETER and state:
params['state'] = state
if self.RESPONSE_TYPE:
params['response_type'] = self.RESPONSE_TYPE
return params
def auth_complete_params(self, state=None):
appid, secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code', # request auth code
'code': self.data.get('code', ''), # server response code
'appid': appid,
'secret': secret,
'redirect_uri': self.get_redirect_uri(state)
}
def refresh_token_params(self, token, *args, **kwargs):
appid, secret = self.get_key_and_secret()
return {
'refresh_token': token,
'grant_type': 'refresh_token',
'appid': appid,
'secret': secret
}
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
self.process_error(self.data)
try:
response = self.request_access_token(
self.ACCESS_TOKEN_URL,
data=self.auth_complete_params(self.validate_state()),
headers=self.auth_headers(),
method=self.ACCESS_TOKEN_METHOD
)
except HTTPError as err:
if err.response.status_code == 400:
raise AuthCanceled(self)
else:
raise
except KeyError:
raise AuthUnknownError(self)
if 'errcode' in response:
raise AuthCanceled(self)
self.process_error(response)
return self.do_auth(response['access_token'], response=response,
*args, **kwargs)
| bsd-3-clause |
danmit/django-calaccess-raw-data | calaccess_raw/admin/__init__.py | 29 | 4140 | from calaccess_raw.admin.base import BaseAdmin
from calaccess_raw.admin.campaign import (
CvrSoCdAdmin,
Cvr2SoCdAdmin,
CvrCampaignDisclosureCdAdmin,
Cvr2CampaignDisclosureCdAdmin,
RcptCdAdmin,
Cvr3VerificationInfoCdAdmin,
LoanCdAdmin,
S401CdAdmin,
ExpnCdAdmin,
F495P2CdAdmin,
DebtCdAdmin,
S496CdAdmin,
SpltCdAdmin,
S497CdAdmin,
F501502CdAdmin,
S498CdAdmin,
)
from calaccess_raw.admin.lobbying import (
CvrRegistrationCdAdmin,
Cvr2RegistrationCdAdmin,
CvrLobbyDisclosureCdAdmin,
Cvr2LobbyDisclosureCdAdmin,
LobbyAmendmentsCdAdmin,
F690P2CdAdmin,
LattCdAdmin,
LexpCdAdmin,
LccmCdAdmin,
LothCdAdmin,
LempCdAdmin,
LpayCdAdmin,
)
from calaccess_raw.admin.common import (
FilernameCdAdmin,
FilerFilingsCdAdmin,
FilingsCdAdmin,
SmryCdAdmin,
CvrE530CdAdmin,
TextMemoCdAdmin,
)
from calaccess_raw.admin.other import (
AcronymsCdAdmin,
AddressCdAdmin,
BallotMeasuresCdAdmin,
EfsFilingLogCdAdmin,
FilersCdAdmin,
FilerAcronymsCdAdmin,
FilerAddressCdAdmin,
FilerEthicsClassCdAdmin,
FilerInterestsCdAdmin,
FilerLinksCdAdmin,
FilerStatusTypesCdAdmin,
FilerToFilerTypeCdAdmin,
FilerTypesCdAdmin,
FilerXrefCdAdmin,
FilingPeriodCdAdmin,
GroupTypesCdAdmin,
HeaderCdAdmin,
HdrCdAdmin,
ImageLinksCdAdmin,
LegislativeSessionsCdAdmin,
LobbyingChgLogCdAdmin,
LobbyistContributions1CdAdmin,
LobbyistContributions2CdAdmin,
LobbyistContributions3CdAdmin,
LobbyistEmployer1CdAdmin,
LobbyistEmployer2CdAdmin,
LobbyistEmployer3CdAdmin,
LobbyistEmployerFirms1CdAdmin,
LobbyistEmployerFirms2CdAdmin,
LobbyistEmpLobbyist1CdAdmin,
LobbyistEmpLobbyist2CdAdmin,
LobbyistFirm1CdAdmin,
LobbyistFirm2CdAdmin,
LobbyistFirm3CdAdmin,
LobbyistFirmEmployer1CdAdmin,
LobbyistFirmEmployer2CdAdmin,
LobbyistFirmLobbyist1CdAdmin,
LobbyistFirmLobbyist2CdAdmin,
LookupCodeAdmin,
NamesCdAdmin,
ReceivedFilingsCdAdmin,
ReportsCdAdmin,
)
__all__ = [
'BaseAdmin',
'CvrSoCdAdmin',
'Cvr2SoCdAdmin',
'CvrCampaignDisclosureCdAdmin',
'Cvr2CampaignDisclosureCdAdmin',
'RcptCdAdmin',
'Cvr3VerificationInfoCdAdmin',
'LoanCdAdmin',
'S401CdAdmin',
'ExpnCdAdmin',
'F495P2CdAdmin',
'DebtCdAdmin',
'S496CdAdmin',
'SpltCdAdmin',
'S497CdAdmin',
'F501502CdAdmin',
'S498CdAdmin',
'CvrRegistrationCdAdmin',
'Cvr2RegistrationCdAdmin',
'CvrLobbyDisclosureCdAdmin',
'Cvr2LobbyDisclosureCdAdmin',
'LobbyAmendmentsCdAdmin',
'F690P2CdAdmin',
'LattCdAdmin',
'LexpCdAdmin',
'LccmCdAdmin',
'LothCdAdmin',
'LempCdAdmin',
'LpayCdAdmin',
'FilerFilingsCdAdmin',
'FilingsCdAdmin',
'SmryCdAdmin',
'CvrE530CdAdmin',
'TextMemoCdAdmin',
'AcronymsCdAdmin',
'AddressCdAdmin',
'BallotMeasuresCdAdmin',
'EfsFilingLogCdAdmin',
'FilernameCdAdmin',
'FilersCdAdmin',
'FilerAcronymsCdAdmin',
'FilerAddressCdAdmin',
'FilerEthicsClassCdAdmin',
'FilerInterestsCdAdmin',
'FilerLinksCdAdmin',
'FilerStatusTypesCdAdmin',
'FilerToFilerTypeCdAdmin',
'FilerTypesCdAdmin',
'FilerXrefCdAdmin',
'FilingPeriodCdAdmin',
'GroupTypesCdAdmin',
'HeaderCdAdmin',
'HdrCdAdmin',
'ImageLinksCdAdmin',
'LegislativeSessionsCdAdmin',
'LobbyingChgLogCdAdmin',
'LobbyistContributions1CdAdmin',
'LobbyistContributions2CdAdmin',
'LobbyistContributions3CdAdmin',
'LobbyistEmployer1CdAdmin',
'LobbyistEmployer2CdAdmin',
'LobbyistEmployer3CdAdmin',
'LobbyistEmployerFirms1CdAdmin',
'LobbyistEmployerFirms2CdAdmin',
'LobbyistEmpLobbyist1CdAdmin',
'LobbyistEmpLobbyist2CdAdmin',
'LobbyistFirm1CdAdmin',
'LobbyistFirm2CdAdmin',
'LobbyistFirm3CdAdmin',
'LobbyistFirmEmployer1CdAdmin',
'LobbyistFirmEmployer2CdAdmin',
'LobbyistFirmLobbyist1CdAdmin',
'LobbyistFirmLobbyist2CdAdmin',
'LookupCodeAdmin',
'NamesCdAdmin',
'ReceivedFilingsCdAdmin',
'ReportsCdAdmin',
]
| mit |
Bleyddyn/malpi | malpi/dkwm/gym_envs/renderer.py | 1 | 3073 | from enum import Enum
import pyglet
from pyglet.gl import GLubyte, glFlush
import numpy as np
from ctypes import POINTER
# Setting this env variable was needed to let pyglet find libc:
# export DYLD_FALLBACK_LIBRARY_PATH=/usr/lib
class DKWMRenderer(object):
""" A renderer for the DKWM gym.
Holds and displays the previous image. Draws a given label to one of the display corners.
Based on: https://github.com/maximecb/gym-miniworld/blob/master/gym_miniworld/miniworld.py
"""
# See: http://www.blog.pythonlibrary.org/2018/03/20/python-3-an-intro-to-enumerations/
labelEnum = Enum( "Label", "TopLeft TopRight BottomLeft BottomRight" )
def __init__( self, window_width, window_height ):
self.window_width = window_width
self.window_height = window_height
self.window = None
self.labels = {}
def close(self):
if self.window is not None:
self.window.close()
def set_obs( self, next_obs ):
self.last_obs = next_obs
def clear_label( self, label_id ):
self.labels.pop( label_id, None )
def set_label( self, label_text, label_id, location=labelEnum.TopRight ):
# TODO Handle location
self.labels[label_id] = pyglet.text.Label(
font_name="Courier",
font_size=12,
multiline=True,
width=400,
x = 10,
y = 30
)
self.labels[label_id].text = label_text
def render(self, mode='human'):
img = self.last_obs
if mode == 'rgb_array':
return img
if self.window is None:
config = pyglet.gl.Config(double_buffer=True)
self.window = pyglet.window.Window(
width=self.window_width,
height=self.window_height,
resizable=False,
config=config
)
self.window.clear()
self.window.switch_to()
img_width = img.shape[1]
img_height = img.shape[0]
img = np.ascontiguousarray(np.flip(img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_left = (self.window_width - img_width) // 2
img_top = (self.window_height - img_height) // 2
img_data.blit(
img_left,
img_top,
0,
width=img_width,
height=img_height
)
for a_label in self.labels.values():
# Draw the text label in the window
a_label.draw()
# Force execution of queued commands
glFlush()
# If we are not running the Pyglet event loop,
# we have to manually flip the buffers and dispatch events
if mode == 'human':
self.window.flip()
self.window.dispatch_events()
def reset(self):
self.labels = {}
if self.window is not None:
self.window.clear()
| mit |
KrozekGimVic/Grafi | test/googletest/googletest/test/gtest_list_tests_unittest.py | 1898 | 6515 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
pupboss/xndian | deploy/site-packages/pip/_vendor/html5lib/inputstream.py | 186 | 30636 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mit |
GAMPTeam/vampyre | test/test_sparse/test_mlvamp_probit.py | 1 | 5993 | from __future__ import division
"""
test_vamp_probit.py: tests for ML-VAMP for probit estimation
"""
# Add the path to the vampyre package and import it
import env
env.add_vp_path()
import vampyre as vp
# Add other packages
import numpy as np
import unittest
def debias_mse(zhat,ztrue):
"""
If zhat and ztrue are 1D vectors, the function computes the *debiased normalized MSE* defined as:
dmse_lin = min_c ||ztrue-c*zhat||^2/||ztrue||^2 = (1-|zhat'*ztrue|^2/||ztrue||^2||zhat||^2)
The function returns the value in dB: dmse = 10*log10(dmse_lin)
If zhat and ztrue are matrices, dmse_lin is computed for each column and then averaged over the columns
"""
zcorr = np.abs(np.sum(zhat.conj()*ztrue,axis=0))**2
zhatpow = np.sum(np.abs(zhat)**2,axis=0)
zpow = np.sum(np.abs(ztrue)**2,axis=0)
tol = 1e-8
if np.any(zhatpow < tol) or np.any(zpow < tol):
dmse = 0
else:
dmse = 10*np.log10(np.mean(1 - zcorr/zhatpow/zpow))
return dmse
def probit_test(nz0=512,nz1=4096,ncol=10, snr=30, verbose=False, plot=False,\
est_meth='cg', nit_cg=10, mse_tol=-20):
"""
Test VAMP on a sparse probit estimation problem
In this test, the input :math:`z_0` is a Bernoulli-Gaussian and
:math:`z_1=Az_0+w` where :math:`w` is Gaussian noise and :math:`A` is an
i.i.d. Gaussian matrix. The problem is to estimate :math:`z_0` from
binary measurements :math:`y=sign(z_1)`. This is equivalent to sparse
probit estimation in statistics.
:param nz0: number of rows of :math:`z_0`
:param nz1: number of rows of :math:`z_1`
:param ncol: number of columns of :math:`z_1` and :math:`z_0`
:param snr: SNR in dB
:param Boolean verbose: Flag indicating if the test results are
to be printed.
:param Boolean plot: Flag indicating if the test results are
to be plot
:param est_meth: Estimation method. Either `svd` or `cg`
:param nit_cg: number of CG iterations
:param mse_tol: MSE must be below this value for test to pass.
"""
# Parameters
map_est = False
sparse_rat = 0.1
# Compute the dimensions
ny = nz1
if (ncol==1):
zshape0 = (nz0,)
zshape1 = (nz1,)
yshape = (ny,)
else:
zshape0 = (nz0,ncol)
zshape1 = (nz1,ncol)
yshape = (ny,ncol)
Ashape = (nz1,nz0)
# Generate random input z
#np.random.seed(40)
zpowtgt = 2
zmean0 = 0
zvar0 = zpowtgt/sparse_rat
z0 = np.random.normal(zmean0,np.sqrt(zvar0),zshape0)
u = np.random.uniform(0,1,zshape0) < sparse_rat
z0 = z0*u
zpow = np.mean(z0**2,axis=0)
if (ncol > 1):
zpow = zpow[None,:]
z0 = z0*np.sqrt(zpowtgt/zpow)
# Create a random transform
A = np.random.normal(0,np.sqrt(1/nz0), Ashape)
b = np.random.normal(0,1,zshape1)
# Lienar transform
Az0 = A.dot(z0) + b
wvar = np.power(10,-0.1*snr)*np.mean(np.abs(Az0)**2)
z1 = Az0 + np.random.normal(0,np.sqrt(wvar),yshape)
# Signed output
thresh = 0
y = (z1 > thresh)
# Create estimators for the input and output of the transform
est0_gauss = vp.estim.GaussEst(zmean0,zvar0,zshape0,map_est=map_est)
est0_dis = vp.estim.DiscreteEst(0,1,zshape0)
est_in = vp.estim.MixEst([est0_gauss,est0_dis],[sparse_rat,1-sparse_rat],\
name='Input')
est_out = vp.estim.BinaryQuantEst(y,yshape,thresh=thresh, name='Output')
# Estimtor for the linear transform
Aop = vp.trans.MatrixLT(A,zshape0)
est_lin = vp.estim.LinEstTwo(Aop,b,wvar,est_meth=est_meth,nit_cg=nit_cg,\
name ='Linear')
# List of the estimators
est_list = [est_in,est_lin,est_out]
# Create the message handler
damp=1
msg_hdl0 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape0,damp=damp)
msg_hdl1 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape1,damp=damp)
msg_hdl_list = [msg_hdl0,msg_hdl1]
ztrue = [z0,z1]
solver = vp.solver.mlvamp.MLVamp(est_list,msg_hdl_list,comp_cost=True,\
hist_list=['zhat','zhatvar'])
# Run the solver
solver.solve()
# Get the estimates and predicted variances
zhat_hist = solver.hist_dict['zhat']
zvar_hist = solver.hist_dict['zhatvar']
# Compute per iteration errors
nvar = len(ztrue)
nit2 = len(zhat_hist)
mse_act = np.zeros((nit2,nvar))
mse_pred = np.zeros((nit2,nvar))
for ivar in range(nvar):
zpowi = np.mean(np.abs(ztrue[ivar])**2, axis=0)
for it in range(nit2):
zhati = zhat_hist[it][ivar]
zhatvari = zvar_hist[it][ivar]
mse_act[it,ivar] = debias_mse(zhati,ztrue[ivar])
mse_pred[it,ivar] = 10*np.log10(np.mean(zhatvari/zpowi))
# Check failure
fail = np.any(mse_act[-1,:] > mse_tol)
# Display the final MSE
if verbose or fail:
print("z0 mse: act: {0:7.2f} pred: {1:7.2f}".format(\
mse_act[-1,0],mse_pred[-1,0]))
print("z1 mse: act: {0:7.2f} pred: {1:7.2f}".format(\
mse_act[-1,1],mse_pred[-1,1]))
if plot:
import matplotlib.pyplot as plt
t = np.array(range(nit2))
for ivar in range(nvar):
plt.subplot(1,nvar,ivar+1)
zpow = np.mean(abs(ztrue[ivar])**2)
plt.plot(t, mse_act[:,ivar], 's-')
plt.plot(t, mse_pred[:,ivar], 'o-')
plt.legend(['true','pred'])
if fail:
raise vp.common.VpException("Final MSE higher than expected")
class TestCases(unittest.TestCase):
def test_mlvamp_sparse_probit(self):
"""
Calls the probit estimation test case
"""
#probit_test(ncol=10,est_meth='cg')
probit_test(ncol=10,est_meth='svd',plot=False)
if __name__ == '__main__':
unittest.main()
| mit |
MiltosD/CEF-ELRC | lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/topology.py | 311 | 2226 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
'geos_difference', 'geos_envelope', 'geos_intersection',
'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
__all__.append('geos_cascaded_union')
| bsd-3-clause |
jmesteve/asterisk | openerp/tools/cache.py | 75 | 3742 | import lru
class ormcache(object):
""" LRU cache decorator for orm methods,
"""
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
self.size = size
self.method = None
self.stat_miss = 0
self.stat_hit = 0
self.stat_err = 0
def __call__(self,m):
self.method = m
def lookup(self2, cr, *args):
r = self.lookup(self2, cr, *args)
return r
lookup.clear_cache = self.clear
return lookup
def stat(self):
return "lookup-stats hit=%s miss=%s err=%s ratio=%.1f" % (self.stat_hit,self.stat_miss,self.stat_err, (100*float(self.stat_hit))/(self.stat_miss+self.stat_hit) )
def lru(self, self2):
try:
ormcache = getattr(self2, '_ormcache')
except AttributeError:
ormcache = self2._ormcache = {}
try:
d = ormcache[self.method]
except KeyError:
d = ormcache[self.method] = lru.LRU(self.size)
return d
def lookup(self, self2, cr, *args):
d = self.lru(self2)
key = args[self.skiparg-2:]
try:
r = d[key]
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
value = d[key] = self.method(self2, cr, *args)
return value
except TypeError:
self.stat_err += 1
return self.method(self2, cr, *args)
def clear(self, self2, *args):
""" Remove *args entry from the cache or all keys if *args is undefined
"""
d = self.lru(self2)
if args:
try:
key = args[self.skiparg-2:]
del d[key]
self2.pool._any_cache_cleared = True
except KeyError:
pass
else:
d.clear()
self2.pool._any_cache_cleared = True
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=3):
super(ormcache_multi,self).__init__(skiparg,size)
self.multi = multi - 2
def lookup(self, self2, cr, *args):
d = self.lru(self2)
args = list(args)
multi = self.multi
ids = args[multi]
r = {}
miss = []
for i in ids:
args[multi] = i
key = tuple(args[self.skiparg-2:])
try:
r[i] = d[key]
self.stat_hit += 1
except Exception:
self.stat_miss += 1
miss.append(i)
if miss:
args[multi] = miss
r.update(self.method(self2, cr, *args))
for i in miss:
args[multi] = i
key = tuple(args[self.skiparg-2:])
d[key] = r[i]
return r
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching.
"""
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
if __name__ == '__main__':
class A():
@ormcache()
def m(self,a,b):
print "A::m(", self,a,b
return 1
@ormcache_multi(multi=3)
def n(self,cr,uid,ids):
print "m", self,cr,uid,ids
return dict([(i,i) for i in ids])
a=A()
r=a.m(1,2)
r=a.m(1,2)
r=a.n("cr",1,[1,2,3,4])
r=a.n("cr",1,[1,2])
print r
for i in a._ormcache:
print a._ormcache[i].d
a.n.clear_cache(a,1,1)
r=a.n("cr",1,[1,2])
print r
r=a.n("cr",1,[1,2])
# For backward compatibility
cache = ormcache
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
christophreimer/pygeobase | pygeobase/object_base.py | 1 | 4070 | # Copyright (c) 2015, Vienna University of Technology, Department of Geodesy
# and Geoinformation. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of
# Geodesy and Geoinformation nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pandas as pd
import numpy as np
class TS(object):
"""
The TS class represents the base object of a time series.
"""
def __init__(self, gpi, lon, lat, data, metadata):
"""
Initialization of the time series object.
Parameters
----------
lon : float
Longitude of the time series
lat : float
Latitude of the time series
data : pandas.DataFrame
Pandas DataFrame that holds data for each variable of the time
series
metadata : dict
dictionary that holds metadata
"""
self.gpi = gpi
self.lon = lon
self.lat = lat
self.data = data
self.metadata = metadata
def __repr__(self):
return "Time series gpi:%d lat:%2.3f lon:%3.3f" % (self.gpi,
self.lat,
self.lon)
def plot(self, *args, **kwargs):
"""
wrapper for pandas.DataFrame.plot which adds title to plot
and drops NaN values for plotting
Returns
-------
ax : axes
matplotlib axes of the plot
"""
tempdata = self.data.dropna(how='all')
ax = tempdata.plot(*args, figsize=(15, 5), **kwargs)
ax.set_title(self.__repr__())
return ax
class Image(object):
"""
The Image class represents the base object of an image.
"""
def __init__(self, lon, lat, data, metadata, timestamp, timekey='jd'):
"""
Initialization of the image object.
Parameters
----------
lon : numpy.array
array of longitudes
lat : numpy.array
array of latitudes
data : dict
dictionary of numpy arrays that holds the image data for each
variable of the dataset
metadata : dict
dictionary that holds metadata
timestamp : datetime.datetime
exact timestamp of the image
timekey : str, optional
Key of the time variable, if available, stored in data dictionary.
"""
self.lon = lon
self.lat = lat
self.data = data
self.metadata = metadata
self.timestamp = timestamp
self.timekey = timekey
| bsd-3-clause |
jeffchao/xen-3.3-tcg | dist/install/usr/lib/python/xen/xend/XendStateStore.py | 6 | 8956 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (c) 2006 Xensource Inc.
#============================================================================
import os
from xen.xend import uuid
from xen.xend.XendLogging import log
from xml.dom import minidom
from xml.dom import Node
class XendStateStore:
"""Manages persistent storage of Xend's internal state, mainly
relating to API objects.
It stores objects atomically in the file system as flat XML files
categorised by their 'class'.
For example:
/var/lib/xend/state/cpu.xml will contain the host cpu state
/var/lib/xend/state/sr.xml will contain the storage repository state.
For the application, it will load the state via this class:
load_state('cpu') will return a marshalled dictionary object
containing the cpu state.
save_state('cpu', dict) will save the state contained in the dictionary
object about the 'cpu'.
The state is stored where each top level element has a UUID in its
attributes. eg:
host['49c01812-3c28-1ad4-a59d-2a3f81b13ec2'] = {
'name': 'norwich',
'desc': 'Test Xen Host',
'cpu': {'6fc2d1ed-7eb0-4c9d-8006-3657d5483ae0': <obj>,
'669df3b8-62be-4e61-800b-bbe8ee63a760': <obj>}
}
will turn into:
<hosts>
<host uuid='49c01812-3c28-1ad4-a59d-2a3f81b13ec2'>
<name type='string'>norwich</name>
<description type='string'>Test Xen Host</description>
<cpu type='dict'>
<item uuid='6fc2d1ed-7eb0-4c9d-8006-3657d5483ae0' />
<item uuid='669df3b8-62be-4e61-800b-bbe8ee63a760' />
</cpu>
</host>
</hosts>
Note that it only dumps one level, so the references to CPU are
stored in a separate file.
"""
def __init__(self, base = "/var/lib/xend/state"):
self.base = base
if not os.path.exists(self.base):
os.makedirs(self.base)
def _xml_file(self, cls):
"""Return the absolute filename of the XML state storage file.
@param cls: name of the class.
@type cls: string
@rtype: string
@return absolute filename of XML file to write/read from.
"""
return os.path.join(self.base, '%s.xml' % cls)
def load_state(self, cls):
"""Load the saved state of a class from persistent XML storage.
References loaded from the XML will just point to an empty
dictionary which the caller will need to replace manually.
@param cls: name of the class to load.
@type cls: string
@rtype: dict
"""
xml_path = self._xml_file(cls)
if not os.path.exists(xml_path):
return {}
dom = minidom.parse(xml_path)
root = dom.documentElement
state = {}
for child in root.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue # skip non element nodes
uuid = child.getAttribute('uuid').encode('utf8')
cls_dict = {}
for val_elem in child.childNodes:
if val_elem.nodeType != Node.ELEMENT_NODE:
continue # skip non element nodes
val_name = val_elem.tagName
val_type = val_elem.getAttribute('type').encode('utf8')
val_uuid = val_elem.getAttribute('uuid').encode('utf8')
val_elem.normalize()
val_text = ''
if val_elem.firstChild:
val_text = val_elem.firstChild.nodeValue.strip()
if val_type == 'list':
cls_dict[val_name] = []
for item in val_elem.childNodes:
if item.nodeType != Node.ELEMENT_NODE:
continue # skip non element nodes
cls_dict[val_name].append(item.getAttribute('uuid'))
elif val_type == 'dict':
cls_dict[val_name] = {}
for item in val_elem.childNodes:
if item.nodeType != Node.ELEMENT_NODE:
continue # skip non element nodes
k = item.getAttribute('key').encode('utf8')
v = item.getAttribute('value').encode('utf8')
cls_dict[val_name][k] = v
elif val_type == 'string':
cls_dict[val_name] = val_text.encode('utf8')
elif val_type == 'float':
cls_dict[val_name] = float(val_text)
elif val_type == 'int':
cls_dict[val_name] = int(val_text)
elif val_type == 'bool':
cls_dict[val_name] = bool(int(val_text))
state[uuid] = cls_dict
return state
def save_state(self, cls, state):
"""Save a Xen API record struct into an XML persistent storage
for future loading when Xend restarts.
If we encounter a dictionary or a list, we only store the
keys because they are going to be UUID references to another
object.
@param cls: Class name (singular) of the record
@type cls: string
@param state: a Xen API struct of the state of the class.
@type state: dict
@rtype: None
"""
xml_path = self._xml_file(cls)
doc = minidom.getDOMImplementation().createDocument(None,
cls + 's',
None)
root = doc.documentElement
# Marshall a dictionary into our custom XML file format.
for uuid, info in state.items():
node = doc.createElement(cls)
root.appendChild(node)
node.setAttribute('uuid', uuid)
for key, val in info.items():
store_val = val
store_type = None
# deal with basic types
if type(val) in (str, unicode):
store_val = val
store_type = 'string'
elif type(val) == int:
store_val = str(val)
store_type = 'int'
elif type(val) == float:
store_val = str(val)
store_type = 'float'
elif type(val) == bool:
store_val = str(int(val))
store_type = 'bool'
if store_type is not None:
val_node = doc.createElement(key)
val_node.setAttribute('type', store_type)
node.appendChild(val_node)
# attach the value
val_text = doc.createTextNode(store_val)
val_node.appendChild(val_text)
continue
# deal with dicts and lists
if type(val) == dict:
val_node = doc.createElement(key)
val_node.setAttribute('type', 'dict')
for val_item in val.keys():
tmp = doc.createElement("item")
if key in ['other_config', 'device_config']:
tmp.setAttribute('key', str(val_item))
tmp.setAttribute('value', str(val[val_item]))
else:
tmp.setAttribute('uuid', val_uuid)
val_node.appendChild(tmp)
node.appendChild(val_node)
elif type(val) in (list, tuple):
val_node = doc.createElement(key)
val_node.setAttribute('type', 'list')
for val_uuid in val:
tmp = doc.createElement("item")
tmp.setAttribute('uuid', val_uuid)
val_node.appendChild(tmp)
node.appendChild(val_node)
open(xml_path, 'w').write(doc.toprettyxml())
| gpl-2.0 |
tudorvio/nova | nova/tests/functional/v3/test_security_groups.py | 18 | 7401 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.network.security_group import neutron_driver
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 1
nova_group['description'] = 'default'
nova_group['name'] = 'default'
nova_group['project_id'] = 'openstack'
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(self, context, servers,
detailed=False):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
def fake_add_to_instance(self, context, instance, security_group_name):
pass
def fake_remove_from_instance(self, context, instance, security_group_name):
pass
def fake_list(self, context, names=None, ids=None, project=None,
search_opts=None):
return [fake_get()]
def fake_get_instance_security_groups(self, context, instance_uuid,
detailed=False):
return [fake_get()]
def fake_create_security_group(self, context, name, description):
return fake_get()
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-security-groups'
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(SecurityGroupsJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.security_groups.'
'Security_groups')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.Extended_ips')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
return f
def setUp(self):
self.flags(security_group_api=('neutron'))
super(SecurityGroupsJsonTest, self).setUp()
self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'add_to_instance',
fake_add_to_instance)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'remove_from_instance',
fake_remove_from_instance)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'list',
fake_list)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'create_security_group',
fake_create_security_group)
def test_server_create(self):
self._post_server(use_common_server_api_samples=False)
def test_server_get(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_server_detail(self):
self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
def _get_create_subs(self):
return {
'group_name': 'default',
"description": "default",
}
def _create_security_group(self):
subs = self._get_create_subs()
return self._do_post('os-security-groups',
'security-group-post-req', subs)
def _add_group(self, uuid):
subs = {
'group_name': 'test'
}
return self._do_post('servers/%s/action' % uuid,
'security-group-add-post-req', subs)
def test_security_group_create(self):
response = self._create_security_group()
subs = self._get_create_subs()
self._verify_response('security-groups-create-resp', subs,
response, 200)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
self._verify_response('security-groups-list-get-resp',
subs, response, 200)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '11111111-1111-1111-1111-111111111111'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
self._verify_response('security-groups-get-resp', subs, response, 200)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
self._verify_response('server-security-groups-list-resp',
subs, response, 200)
def test_security_groups_add(self):
self._create_security_group()
uuid = self._post_server(use_common_server_api_samples=False)
response = self._add_group(uuid)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_security_groups_remove(self):
self._create_security_group()
uuid = self._post_server(use_common_server_api_samples=False)
self._add_group(uuid)
subs = {
'group_name': 'test'
}
response = self._do_post('servers/%s/action' % uuid,
'security-group-remove-post-req', subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| apache-2.0 |
RigsOfRods/rigs-of-rods | tools/blender/2.82/ror_import/__init__.py | 4 | 5741 | # <pep8-80 compliant>
bl_info = {
"name": "RoR Importer",
"author": "ulteq",
"version": (0, 0, 2),
"blender": (2, 82, 0),
"category": "RoR",
}
import bpy
import json
import bmesh
from bpy.props import StringProperty
from bpy_extras.io_utils import ImportHelper
def register():
bpy.context.preferences.view.show_developer_ui = True
bpy.utils.register_class(ROR_OT_importer)
bpy.types.TOPBAR_MT_file_import.append(menu_func)
return
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func)
bpy.utils.unregister_class(ROR_OT_importer)
return
def menu_func(self, context):
self.layout.operator(ROR_OT_importer.bl_idname, text="Truck (.truck)")
class ROR_OT_importer(bpy.types.Operator, ImportHelper):
bl_idname = "import_truck.truck"
bl_label = "Import RoR Truck"
filename_ext = ""
filter_glob : StringProperty(
default="*.truck;*.trailer;*.load;*.car;*.boat;*.airplane;*.train;*.machine;*.fixed",
options={'HIDDEN'},
)
filepath : bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
truckfile = []
node_idx = 0
nodes = []
beam_idx = 0
beams = []
cab_idx = 0
cabs = []
with open(self.filepath, 'r') as f:
node_defaults = ''
beam_defaults = ''
mode = 'ignore'
groups = []
for line in f:
line = line.strip()
if not line or line[0] == ';':
if mode == 'nodes' and line[:5] == ';grp:':
groups = [g.strip() for g in line[5:].split(',')]
elif mode == 'beams' and line[:5] == ';grp:':
pass
else:
truckfile.append(line)
continue
args = line.replace(',', ' ').split()
if not args or "set_" in args[0]:
if args and mode == 'nodes' and "set_n" in args[0]:
node_defaults = line
if args and mode == 'beams' and "set_b" in args[0]:
beam_defaults = line
else:
truckfile.append(line)
continue
if args[0] == 'nodes':
mode = 'nodes'
node_defaults = ''
node_idx = len(truckfile)
continue
elif args[0] == 'beams':
mode = 'beams'
beam_defaults = ''
beam_idx = len(truckfile)
continue
elif args[0] == 'cab':
mode = 'cab'
cab_idx = len(truckfile)
continue
elif not args[0].isdigit() or mode == 'ignore':
truckfile.append(line)
mode = 'ignore'
if mode == 'nodes':
nodes.append([node_defaults] + [groups] + args[1:])
elif mode == 'beams':
beams.append([beam_defaults] + args)
elif mode == 'cab':
cabs.append(args)
mesh = bpy.data.meshes.new(self.filepath + "-mesh")
obj = bpy.data.objects.new(self.filepath + "-obj", mesh)
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
bpy.types.Object.RoRTruckFile = bpy.props.StringProperty()
bpy.context.active_object.RoRTruckFile = json.dumps(truckfile)
if (beam_idx < node_idx):
beam_idx = len(truckfile)
if (cab_idx < beam_idx):
cab_idx = len(truckfile)
indices = [node_idx, beam_idx, cab_idx]
bpy.types.Object.RoRInsertIndices = bpy.props.StringProperty()
bpy.context.active_object.RoRInsertIndices = json.dumps(indices)
mesh = bpy.context.object.data
bm = bmesh.new()
dl = bm.verts.layers.deform.verify()
defaults_key = bm.verts.layers.string.new("defaults")
options_key = bm.verts.layers.string.new("options")
for n in nodes:
try:
v = bm.verts.new((float(n[4]), float(n[2]), float(n[3])))
bm.verts.ensure_lookup_table()
bm.verts.index_update()
bm.verts[-1][defaults_key] = n[0].encode()
bm.verts[-1][options_key] = ' '.join(n[5:]).encode()
for g in n[1]:
vg = obj.vertex_groups.get(g)
if not vg:
vg = obj.vertex_groups.new(name=g)
v[dl][vg.index] = 1.0
except:
print ("Failed to add vertex:", n)
defaults_key = bm.edges.layers.string.new("defaults")
options_key = bm.edges.layers.string.new("options")
for b in beams:
try:
bm.edges.new((bm.verts[int(i)] for i in b[1:3]))
bm.edges.ensure_lookup_table()
bm.edges[-1][defaults_key] = b[0].encode()
bm.edges[-1][options_key] = ' '.join(b[3:]).encode()
except:
print ("Failed to add edge:", b)
options_key = bm.faces.layers.string.new("options")
for c in cabs:
try:
bm.faces.new((bm.verts[int(i)] for i in c[:3]))
bm.faces.ensure_lookup_table()
bm.faces[-1][options_key] = ' '.join(c[3:]).encode()
except:
print ("Failed to add face:", c)
bm.to_mesh(mesh)
bm.free()
return {'FINISHED'}
| gpl-3.0 |
AlohaWorld/TR | UHCF/common/userQuality.py | 2 | 1780 | #!/env/python
# -*- encoding: utf-8 -*-
"""
@version: 0.1
@author: wenzhiquan
@contact: wenzhiquanr@163.com
@site: http://github.wenzhiquan.com
@software: PyCharm
@file: userQuality.py
@time: 16/5/19 下午3:50
@description: null
"""
from datetime import datetime
from config import config
from lib import stdLib
def userQuality(functionName='lifeTime'):
if functionName == 'lifeTime':
lifeTime()
def lifeTime():
filename = config.ratingWithLabelFile
read = open(filename, 'r')
data = read.readlines()
read.close()
userDict = dict()
maxTime = datetime.utcfromtimestamp(0)
initTime = datetime.utcnow()
resultDict = dict()
for i in data:
tmp = i[:-1].split(config.separator)
userId = tmp[0]
time = datetime.utcfromtimestamp(float(tmp[3]))
userDict.setdefault(userId, {'max': maxTime, 'min': initTime, 'freq': 0})
if time > userDict[userId]['max']:
userDict[userId]['max'] = time
if time < userDict[userId]['min']:
userDict[userId]['min'] = time
userDict[userId]['freq'] += 1
# count = 0
# max = 0
# min = 1
for i in userDict:
resultDict.setdefault(i, 0)
day = userDict[i]['max'] - userDict[i]['min']
# if day.days() == 0:
# print day
# resultDict[i] = 0
# count += 1
# else:
resultDict[i] = day.days
# if resultDict[i] > max:
# max = resultDict[i]
# if resultDict[i] < min:
# min = resultDict[i]
# print resultDict
#
# for i in resultDict:
# resultDict[i] = (resultDict[i] - min) / (max - min)
# print resultDict
outfile = config.userQualityDict
stdLib.dumpData(resultDict, outfile)
| apache-2.0 |
soldag/home-assistant | homeassistant/scripts/__init__.py | 13 | 2348 | """Home Assistant command line scripts."""
import argparse
import asyncio
import importlib
import logging
import os
import sys
from typing import List, Optional, Sequence, Text
from homeassistant import runner
from homeassistant.bootstrap import async_mount_local_lib_path
from homeassistant.config import get_default_config_dir
from homeassistant.requirements import pip_kwargs
from homeassistant.util.package import install_package, is_installed, is_virtual_env
# mypy: allow-untyped-defs, no-warn-return-any
def run(args: List) -> int:
"""Run a script."""
scripts = []
path = os.path.dirname(__file__)
for fil in os.listdir(path):
if fil == "__pycache__":
continue
if os.path.isdir(os.path.join(path, fil)):
scripts.append(fil)
elif fil != "__init__.py" and fil.endswith(".py"):
scripts.append(fil[:-3])
if not args:
print("Please specify a script to run.")
print("Available scripts:", ", ".join(scripts))
return 1
if args[0] not in scripts:
print("Invalid script specified.")
print("Available scripts:", ", ".join(scripts))
return 1
script = importlib.import_module(f"homeassistant.scripts.{args[0]}")
config_dir = extract_config_dir()
loop = asyncio.get_event_loop()
if not is_virtual_env():
loop.run_until_complete(async_mount_local_lib_path(config_dir))
_pip_kwargs = pip_kwargs(config_dir)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for req in getattr(script, "REQUIREMENTS", []):
if is_installed(req):
continue
if not install_package(req, **_pip_kwargs):
print("Aborting script, could not install dependency", req)
return 1
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
return script.run(args[1:]) # type: ignore
def extract_config_dir(args: Optional[Sequence[Text]] = None) -> str:
"""Extract the config dir from the arguments or get the default."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", default=None)
parsed_args = parser.parse_known_args(args)[0]
return (
os.path.join(os.getcwd(), parsed_args.config)
if parsed_args.config
else get_default_config_dir()
)
| apache-2.0 |
buuav/featureTracker | SurfTracker.py | 1 | 3881 | __author__ = 'gauravhirlekar'
def init_feature():
detector = cv2.SURF(500) # 500 is the threshold Hessian value for the detector.
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=100) # Or pass empty dictionary
matcher = cv2.FlannBasedMatcher(index_params, search_params)
return detector, matcher
def explore_match(win, img1, img2, kp_pairs, status=None, H=None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1 + w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if len(kp_pairs) is 0:
cv2.imshow(win, vis)
return vis
if H is not None and len(status) > 10:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32(cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0))
cv2.polylines(vis, [corners], True, (0, 0, 255), thickness=2)
print cv2.perspectiveTransform(np.float32([w1/2, h1/2]).reshape(1, -1, 2), H).reshape(-1, 2)-np.float32(w1/2)
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
cv2.line(vis, (x1, y1), (x2, y2), green)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness)
cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness)
cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col, thickness)
cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col, thickness)
cv2.imshow(win, vis)
return vis
def filter_matches(kp1, kp2, matches, ratio=0.75):
good_matches = [m[0] for m in matches if m[0].distance <= m[1].distance * ratio]
# Match is good only if the closest match is much closer than the second closest match. 0.75 is arbitrary ratio.
kp_pairs = [(kp1[m.queryIdx], kp2[m.trainIdx]) for m in good_matches]
p1 = np.float32([kp[0].pt for kp in kp_pairs])
p2 = np.float32([kp[1].pt for kp in kp_pairs])
return p1, p2, kp_pairs
if __name__ == '__main__':
import cv2
import numpy as np
winName = 'Detector'
img1 = cv2.imread('sample.png', 0)
detector, matcher = init_feature()
kp1, desc1 = detector.detectAndCompute(img1, None)
cap = cv2.VideoCapture(0)
cv2.namedWindow(winName)
while True:
s, img2 = cap.read()
img2 = cv2.resize(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY), (640, 480))
# img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
kp2, desc2 = detector.detectAndCompute(img2, None)
if desc2 is None:
# print "No descriptors found"
continue
raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)
# knnMatch gives k closest matched keypoints with a L2 norm distance
p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches, 0.7)
if len(p1) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
print '%d / %d inliers/matched' % (np.sum(status), len(status))
else:
H, status = None, None
print '%d matches found, not enough for homography estimation' % len(p1)
vis = explore_match(winName, img1, img2, kp_pairs, status, H)
if cv2.waitKey(1) & 0xFF == 27: # Esc key ends loop
break
cap.release()
cv2.destroyAllWindows() | gpl-2.0 |
nfvlabs/openmano | openvim/openvimd.py | 1 | 15596 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: nfvlabs@tid.es
##
'''
This is the main program of openvim, it reads the configuration
and launches the rest of threads: http clients, openflow controller
and host controllers
'''
__author__="Alfonso Tierno"
__date__ ="$10-jul-2014 12:07:15$"
__version__="0.4.5-r465"
version_date="Feb 2016"
database_version="0.7" #expected database schema version
import httpserver
from utils import auxiliary_functions as af
import sys
import getopt
import time
import vim_db
import yaml
import os
from jsonschema import validate as js_v, exceptions as js_e
import host_thread as ht
import dhcp_thread as dt
import openflow_thread as oft
import threading
from vim_schema import config_schema
import logging
import imp
global config_dic
global logger
logger = logging.getLogger('vim')
def load_configuration(configuration_file):
default_tokens ={'http_port':9080, 'http_host':'localhost',
'of_controller_nets_with_same_vlan':True,
'image_path':'/opt/VNF/images',
'network_vlan_range_start':1000,
'network_vlan_range_end': 4096,
'log_level': "DEBUG",
'log_level_db': "ERROR",
'log_level_of': 'ERROR',
}
try:
#First load configuration from configuration file
#Check config file exists
if not os.path.isfile(configuration_file):
return (False, "Configuration file '"+configuration_file+"' does not exists")
#Read and parse file
(return_status, code) = af.read_file(configuration_file)
if not return_status:
return (return_status, "Error loading configuration file '"+configuration_file+"': "+code)
try:
config = yaml.load(code)
except yaml.YAMLError, exc:
error_pos = ""
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
return (False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": content format error: Failed to parse yaml format")
try:
js_v(config, config_schema)
except js_e.ValidationError, exc:
error_pos = ""
if len(exc.path)>0: error_pos=" at '" + ":".join(map(str, exc.path))+"'"
return False, "Error loading configuration file '"+configuration_file+"'"+error_pos+": "+exc.message
#Check default values tokens
for k,v in default_tokens.items():
if k not in config: config[k]=v
#Check vlan ranges
if config["network_vlan_range_start"]+10 >= config["network_vlan_range_end"]:
return False, "Error invalid network_vlan_range less than 10 elements"
except Exception,e:
return (False, "Error loading configuration file '"+configuration_file+"': "+str(e))
return (True, config)
def create_database_connection(config_dic):
db = vim_db.vim_db( (config_dic["network_vlan_range_start"],config_dic["network_vlan_range_end"]), config_dic['log_level_db'] );
if db.connect(config_dic['db_host'], config_dic['db_user'], config_dic['db_passwd'], config_dic['db_name']) == -1:
logger.error("Cannot connect to database %s at %s@%s", config_dic['db_name'], config_dic['db_user'], config_dic['db_host'])
exit(-1)
return db
def usage():
print "Usage: ", sys.argv[0], "[options]"
print " -v|--version: prints current version"
print " -c|--config [configuration_file]: loads the configuration file (default: openvimd.cfg)"
print " -h|--help: shows this help"
print " -p|--port [port_number]: changes port number and overrides the port number in the configuration file (default: 9090)"
print " -P|--adminport [port_number]: changes admin port number and overrides the port number in the configuration file (default: 9095)"
return
if __name__=="__main__":
#streamformat = "%(levelname)s (%(module)s:%(lineno)d) %(message)s"
streamformat = "%(asctime)s %(name)s %(levelname)s: %(message)s"
logging.basicConfig(format=streamformat, level= logging.DEBUG)
logger.setLevel(logging.DEBUG)
try:
opts, args = getopt.getopt(sys.argv[1:], "hvc:p:P:", ["config", "help", "version", "port", "adminport"])
except getopt.GetoptError, err:
# print help information and exit:
logger.error("%s. Type -h for help", err) # will print something like "option -a not recognized"
#usage()
sys.exit(-2)
port=None
port_admin = None
config_file = 'openvimd.cfg'
for o, a in opts:
if o in ("-v", "--version"):
print "openvimd version", __version__, version_date
print "(c) Copyright Telefonica"
sys.exit(0)
elif o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-c", "--config"):
config_file = a
elif o in ("-p", "--port"):
port = a
elif o in ("-P", "--adminport"):
port_admin = a
else:
assert False, "Unhandled option"
try:
#Load configuration file
r, config_dic = load_configuration(config_file)
#print config_dic
if not r:
logger.error(config_dic)
config_dic={}
exit(-1)
logging.basicConfig(level = getattr(logging, config_dic['log_level']))
logger.setLevel(getattr(logging, config_dic['log_level']))
#override parameters obtained by command line
if port is not None: config_dic['http_port'] = port
if port_admin is not None: config_dic['http_admin_port'] = port_admin
#check mode
if 'mode' not in config_dic:
config_dic['mode'] = 'normal'
#allow backward compatibility of test_mode option
if 'test_mode' in config_dic and config_dic['test_mode']==True:
config_dic['mode'] = 'test'
if config_dic['mode'] == 'development' and ( 'development_bridge' not in config_dic or config_dic['development_bridge'] not in config_dic.get("bridge_ifaces",None) ):
logger.error("'%s' is not a valid 'development_bridge', not one of the 'bridge_ifaces'", config_file)
exit(-1)
if config_dic['mode'] != 'normal':
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print "!! Warning, openvimd in TEST mode '%s'" % config_dic['mode']
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
config_dic['version'] = __version__
#Connect to database
db_http = create_database_connection(config_dic)
r = db_http.get_db_version()
if r[0]<0:
logger.error("DATABASE is not a VIM one or it is a '0.0' version. Try to upgrade to version '%s' with './database_utils/migrate_vim_db.sh'", database_version)
exit(-1)
elif r[1]!=database_version:
logger.error("DATABASE wrong version '%s'. Try to upgrade/downgrade to version '%s' with './database_utils/migrate_vim_db.sh'", r[1], database_version)
exit(-1)
db_of = create_database_connection(config_dic)
db_lock= threading.Lock()
config_dic['db'] = db_of
config_dic['db_lock'] = db_lock
#precreate interfaces; [bridge:<host_bridge_name>, VLAN used at Host, uuid of network camping in this bridge, speed in Gbit/s
config_dic['dhcp_nets']=[]
config_dic['bridge_nets']=[]
for bridge,vlan_speed in config_dic["bridge_ifaces"].items():
#skip 'development_bridge'
if config_dic['mode'] == 'development' and config_dic['development_bridge'] == bridge:
continue
config_dic['bridge_nets'].append( [bridge, vlan_speed[0], vlan_speed[1], None] )
del config_dic["bridge_ifaces"]
#check if this bridge is already used (present at database) for a network)
used_bridge_nets=[]
for brnet in config_dic['bridge_nets']:
r,nets = db_of.get_table(SELECT=('uuid',), FROM='nets',WHERE={'provider': "bridge:"+brnet[0]})
if r>0:
brnet[3] = nets[0]['uuid']
used_bridge_nets.append(brnet[0])
if config_dic.get("dhcp_server"):
if brnet[0] in config_dic["dhcp_server"]["bridge_ifaces"]:
config_dic['dhcp_nets'].append(nets[0]['uuid'])
if len(used_bridge_nets) > 0 :
logger.info("found used bridge nets: " + ",".join(used_bridge_nets))
#get nets used by dhcp
if config_dic.get("dhcp_server"):
for net in config_dic["dhcp_server"].get("nets", () ):
r,nets = db_of.get_table(SELECT=('uuid',), FROM='nets',WHERE={'name': net})
if r>0:
config_dic['dhcp_nets'].append(nets[0]['uuid'])
# get host list from data base before starting threads
r,hosts = db_of.get_table(SELECT=('name','ip_name','user','uuid'), FROM='hosts', WHERE={'status':'ok'})
if r<0:
logger.error("Cannot get hosts from database %s", hosts)
exit(-1)
# create connector to the openflow controller
of_test_mode = False if config_dic['mode']=='normal' or config_dic['mode']=="OF only" else True
if of_test_mode:
OF_conn = oft.of_test_connector({"of_debug": config_dic['log_level_of']} )
else:
#load other parameters starting by of_ from config dict in a temporal dict
temp_dict={ "of_ip": config_dic['of_controller_ip'],
"of_port": config_dic['of_controller_port'],
"of_dpid": config_dic['of_controller_dpid'],
"of_debug": config_dic['log_level_of']
}
for k,v in config_dic.iteritems():
if type(k) is str and k[0:3]=="of_" and k[0:13] != "of_controller":
temp_dict[k]=v
if config_dic['of_controller']=='opendaylight':
module = "ODL"
elif "of_controller_module" in config_dic:
module = config_dic["of_controller_module"]
else:
module = config_dic['of_controller']
module_info=None
try:
module_info = imp.find_module(module)
OF_conn = imp.load_module("OF_conn", *module_info)
try:
OF_conn = OF_conn.OF_conn(temp_dict)
except Exception as e:
logger.error("Cannot open the Openflow controller '%s': %s", type(e).__name__, str(e))
if module_info and module_info[0]:
file.close(module_info[0])
exit(-1)
except (IOError, ImportError) as e:
if module_info and module_info[0]:
file.close(module_info[0])
logger.error("Cannot open openflow controller module '%s'; %s: %s; revise 'of_controller' field of configuration file.", module, type(e).__name__, str(e))
exit(-1)
#create openflow thread
thread = oft.openflow_thread(OF_conn, of_test=of_test_mode, db=db_of, db_lock=db_lock,
pmp_with_same_vlan=config_dic['of_controller_nets_with_same_vlan'],
debug=config_dic['log_level_of'])
r,c = thread.OF_connector.obtain_port_correspondence()
if r<0:
logger.error("Cannot get openflow information %s", c)
exit()
thread.start()
config_dic['of_thread'] = thread
#create dhcp_server thread
host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
dhcp_params = config_dic.get("dhcp_server")
if dhcp_params:
thread = dt.dhcp_thread(dhcp_params=dhcp_params, test=host_test_mode, dhcp_nets=config_dic["dhcp_nets"], db=db_of, db_lock=db_lock, debug=config_dic['log_level_of'])
thread.start()
config_dic['dhcp_thread'] = thread
#Create one thread for each host
host_test_mode = True if config_dic['mode']=='test' or config_dic['mode']=="OF only" else False
host_develop_mode = True if config_dic['mode']=='development' else False
host_develop_bridge_iface = config_dic.get('development_bridge', None)
config_dic['host_threads'] = {}
for host in hosts:
host['image_path'] = '/opt/VNF/images/openvim'
thread = ht.host_thread(name=host['name'], user=host['user'], host=host['ip_name'], db=db_of, db_lock=db_lock,
test=host_test_mode, image_path=config_dic['image_path'], version=config_dic['version'],
host_id=host['uuid'], develop_mode=host_develop_mode, develop_bridge_iface=host_develop_bridge_iface )
thread.start()
config_dic['host_threads'][ host['uuid'] ] = thread
#Create thread to listen to web requests
http_thread = httpserver.httpserver(db_http, 'http', config_dic['http_host'], config_dic['http_port'], False, config_dic)
http_thread.start()
if 'http_admin_port' in config_dic:
db_http = create_database_connection(config_dic)
http_thread_admin = httpserver.httpserver(db_http, 'http-admin', config_dic['http_host'], config_dic['http_admin_port'], True)
http_thread_admin.start()
else:
http_thread_admin = None
time.sleep(1)
logger.info('Waiting for http clients')
print 'openvimd ready'
print '===================='
sys.stdout.flush()
#TODO: Interactive console would be nice here instead of join or sleep
r="help" #force print help at the beginning
while True:
if r=='exit':
break
elif r!='':
print "type 'exit' for terminate"
r = raw_input('> ')
except (KeyboardInterrupt, SystemExit):
pass
logger.info('Exiting openvimd')
threads = config_dic.get('host_threads', {})
if 'of_thread' in config_dic:
threads['of'] = (config_dic['of_thread'])
if 'dhcp_thread' in config_dic:
threads['dhcp'] = (config_dic['dhcp_thread'])
for thread in threads.values():
thread.insert_task("exit")
for thread in threads.values():
thread.join()
#http_thread.join()
#if http_thread_admin is not None:
#http_thread_admin.join()
logger.debug( "bye!")
exit()
| apache-2.0 |
kavitshah8/SDNDeveloper | pox/lib/util.py | 2 | 13139 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various utility functions
"""
from __future__ import print_function
import traceback
import struct
import sys
import os
import time
import socket
import collections
#FIXME: ugh, why can't I make importing pox.core work here?
import logging
log = logging.getLogger("util")
class DirtyList (list):
#TODO: right now the callback may be called more often than needed
# and it may not be called with good names/parameters.
# All you can really rely on is that it will be called in
# some way if something may have changed.
def __init__ (self, *args, **kw):
list.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def __setslice__ (self, k, v):
#TODO: actually check for change
self._smudge('__setslice__', k, v)
list.__setslice__(self, k, v)
def __delslice__ (self, k):
#TODO: actually check for change
self._smudge('__delslice__', k, None)
list.__delslice__(self, k)
def append (self, v):
self._smudge('append', None, v)
list.append(self, v)
def extend (self, v):
self._smudge('extend', None, v)
list.extend(self, v)
def insert (self, i, v):
self._smudge('insert', k, v)
list.extend(self, v)
def pop (self, i=-1):
self._smudge('pop', i, None)
list.pop(self, i)
def remove (self, v):
if v in self:
self._smudge('remove', None, v)
list.remove(self, v)
def reverse (self):
if len(self):
self._smudge('reverse', None, None)
list.reverse(self)
def sort (self, *arg, **kw):
#TODO: check for changes?
self._smudge('sort', None, None)
list.sort(self, *arg, **kw)
def __setitem__ (self, k, v):
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__setitem__slice',k,v)
elif self[k] != v:
self._smudge('__setitem__',k,v)
list.__setitem__(self, k, v)
assert good
def __delitem__ (self, k):
list.__delitem__(self, k)
if isinstance(k, slice):
#TODO: actually check for change
self._smudge('__delitem__slice',k,v)
else:
self._smudge('__delitem__', k, None)
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
class DirtyDict (dict):
"""
A dict that tracks whether values have been changed shallowly.
If you set a callback, it will be called when the value changes, and
passed three values: "add"/"modify"/"delete", key, value
"""
def __init__ (self, *args, **kw):
dict.__init__(self, *args, **kw)
self.dirty = False
self.callback = None
def _smudge (self, reason, k, v):
if self.callback:
if self.callback(reason, k, v) is not True:
self.dirty = True
else:
self.dirty = True
def __setitem__ (self, k, v):
if k not in self:
self._smudge('__setitem__add',k,v)
elif self[k] != v:
self._smudge('__setitem__modify',k,v)
dict.__setitem__(self, k, v)
def __delitem__ (self, k):
self._smudge('__delitem__', k, None)
dict.__delitem__(self, k)
def set_extend (l, index, item, emptyValue = None):
"""
Adds item to the list l at position index. If index is beyond the end
of the list, it will pad the list out until it's large enough, using
emptyValue for the new entries.
"""
if index >= len(l):
l += ([emptyValue] * (index - len(self) + 1))
l[index] = item
def str_to_dpid (s):
"""
Convert a DPID in the canonical string form into a long int.
"""
if s.lower().startswith("0x"):
s = s[2:]
s = s.replace("-", "").split("|", 2)
a = int(s[0], 16)
if a > 0xffFFffFFffFF:
b = a >> 48
a &= 0xffFFffFFffFF
else:
b = 0
if len(s) == 2:
b = int(s[1])
return a | (b << 48)
strToDPID = str_to_dpid
def dpid_to_str (dpid, alwaysLong = False):
"""
Convert a DPID from a long into into the canonical string form.
"""
if type(dpid) is long or type(dpid) is int:
# Not sure if this is right
dpid = struct.pack('!Q', dpid)
assert len(dpid) == 8
r = '-'.join(['%02x' % (ord(x),) for x in dpid[2:]])
if alwaysLong or dpid[0:2] != (b'\x00'*2):
r += '|' + str(struct.unpack('!H', dpid[0:2])[0])
return r
dpidToStr = dpid_to_str # Deprecated
def assert_type(name, obj, types, none_ok=True):
"""
Assert that a parameter is of a given type.
Raise an Assertion Error with a descriptive error msg if not.
name: name of the parameter for error messages
obj: parameter value to be checked
types: type or list or tuple of types that is acceptable
none_ok: whether 'None' is an ok value
"""
if obj is None:
if none_ok:
return True
else:
raise AssertionError("%s may not be None" % name)
if not isinstance(types, (tuple, list)):
types = [ types ]
for cls in types:
if isinstance(obj, cls):
return True
allowed_types = "|".join(map(lambda x: str(x), types))
stack = traceback.extract_stack()
stack_msg = "Function call %s() in %s:%d" % (stack[-2][2],
stack[-3][0], stack[-3][1])
type_msg = ("%s must be instance of %s (but is %s)"
% (name, allowed_types , str(type(obj))))
raise AssertionError(stack_msg + ": " + type_msg)
def init_helper (obj, kw):
"""
Inside a class's __init__, this will copy keyword arguments to fields
of the same name. See libopenflow for an example.
"""
for k,v in kw.iteritems():
if not hasattr(obj, k):
raise TypeError(obj.__class__.__name__ + " constructor got "
+ "unexpected keyword argument '" + k + "'")
setattr(obj, k, v)
initHelper = init_helper # Deprecated
def make_pinger ():
"""
A pinger is basically a thing to let you wake a select().
On Unix systems, this makes a pipe pair. But on Windows, select() only
works with sockets, so it makes a pair of connected sockets.
"""
class PipePinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
assert os is not None
def ping (self):
if os is None: return #TODO: Is there a better fix for this?
os.write(self._w, ' ')
def fileno (self):
return self._r
def pongAll (self):
#TODO: make this actually read all
os.read(self._r, 1024)
def pong (self):
os.read(self._r, 1)
def __del__ (self):
try:
os.close(self._w)
except:
pass
try:
os.close(self._r)
except:
pass
def __repr__ (self):
return "<%s %i/%i>" % (self.__class__.__name__, self._w, self._r)
class SocketPinger (object):
def __init__ (self, pair):
self._w = pair[1]
self._r = pair[0]
def ping (self):
self._w.send(' ')
def pong (self):
self._r.recv(1)
def pongAll (self):
#TODO: make this actually read all
self._r.recv(1024)
def fileno (self):
return self._r.fileno()
def __repr__ (self):
return "<%s %s/%s>" % (self.__class__.__name__, self._w, self._r)
#return PipePinger((os.pipe()[0],os.pipe()[1])) # To test failure case
if os.name == "posix":
return PipePinger(os.pipe())
#TODO: clean up sockets?
localaddress = '127.127.127.127'
startPort = 10000
import socket
import select
def tryConnect ():
l = socket.socket()
l.setblocking(0)
port = startPort
while True:
try:
l.bind( (localaddress, port) )
break
except:
port += 1
if port - startPort > 1000:
raise RuntimeError("Could not find a free socket")
l.listen(0)
r = socket.socket()
try:
r.connect((localaddress, port))
except:
import traceback
ei = sys.exc_info()
ei = traceback.format_exception_only(ei[0], ei[1])
ei = ''.join(ei).strip()
log.warning("makePinger: connect exception:\n" + ei)
return False
rlist, wlist,elist = select.select([l], [], [l], 2)
if len(elist):
log.warning("makePinger: socket error in select()")
return False
if len(rlist) == 0:
log.warning("makePinger: socket didn't connect")
return False
try:
w, addr = l.accept()
except:
return False
#w.setblocking(0)
if addr != r.getsockname():
log.info("makePinger: pair didn't connect to each other!")
return False
r.setblocking(1)
# Turn off Nagle
r.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return (r, w)
# Try a few times
for i in range(0, 3):
result = tryConnect()
if result is not False:
return SocketPinger(result)
raise RuntimeError("Could not allocate a local socket pair")
makePinger = make_pinger # Deprecated
def is_subclass (cls, classinfo):
"""
A more sensible version of the issubclass builtin
"""
try:
return issubclass(cls, classinfo)
except TypeError:
return False
def str_to_bool (s):
"""
Given a string, parses out whether it is meant to be True or not
"""
s = str(s).lower() # Make sure
if s in ['true', 't', 'yes', 'y', 'on', 'enable', 'enabled', 'ok',
'okay', '1', 'allow', 'allowed']:
return True
try:
r = 10
if s.startswith("0x"):
s = s[2:]
r = 16
i = int(s, r)
if i != 0:
return True
except:
pass
return False
def hexdump (data):
if isinstance(data, str):
data = [ord(c) for c in data]
o = ""
def chunks (data, length):
return (data[i:i+length] for i in xrange(0, len(data), length))
def filt (c):
if c >= 32 and c <= 126: return chr(c)
return '.'
for i,chunk in enumerate(chunks(data,16)):
if i > 0: o += "\n"
o += "%04x: " % (i * 16,)
l = ' '.join("%02x" % (c,) for c in chunk)
l = "%-48s" % (l,)
l = l[:3*8-1] + " " + l[3*8:]
t = ''.join([filt(x) for x in chunk])
l += ' |%-16s|' % (t,)
o += l
return o
def connect_socket_with_backoff (address, port, max_backoff_seconds=32):
'''
Connect to the given address and port. If the connection attempt fails,
exponentially back off, up to the max backoff
return the connected socket, or raise an exception if the connection
was unsuccessful
'''
backoff_seconds = 1
sock = None
print("connect_socket_with_backoff(address=%s, port=%d)"
% (address, port), file=sys.stderr)
while True:
try:
sock = socket.socket()
sock.connect( (address, port) )
break
except socket.error as e:
print("%s. Backing off %d seconds ..." % (str(e), backoff_seconds),
file=sys.stderr)
if backoff_seconds >= max_backoff_seconds:
raise RuntimeError("Could not connect to controller %s:%d"
% (address, port))
else:
time.sleep(backoff_seconds)
backoff_seconds <<= 1
return sock
_scalar_types = (int, long, basestring, float, bool)
def is_scalar (v):
return isinstance(v, _scalar_types)
def is_listlike (o):
"""
Is this a sequence that isn't like a string or bytes?
"""
if isinstance(o, (bytes,str,bytearray)): return False
return isinstance(o, collections.Iterable)
def fields_of (obj, primitives_only=False,
primitives_and_composites_only=False, allow_caps=False,
ignore=set()):
"""
Returns key/value pairs of things that seem like public fields of an object.
"""
#NOTE: The above docstring isn't split into two lines on purpose.
r = {}
for k in dir(obj):
if k.startswith('_'): continue
if k in ignore: continue
v = getattr(obj, k)
if hasattr(v, '__call__'): continue
if not allow_caps and k.upper() == k: continue
if primitives_only:
if not isinstance(v, _scalar_types):
continue
elif primitives_and_composites_only:
if not isinstance(v, (int, long, basestring, float, bool, set,
dict, list)):
continue
#r.append((k,v))
r[k] = v
return r
def eval_args (f):
"""
A decorator which causes arguments to be interpreted as Python literals
This isn't a generic decorator, but is specifically meant for component
launch functions -- the actual magic is in POX's boot code.
The intention is for launch function/commandline arguments (normally all
strings) to easily receive other types.
"""
f._pox_eval_args = True
return f
if __name__ == "__main__":
#TODO: move to tests?
def cb (t,k,v): print(v)
l = DirtyList([10,20,30,40,50])
l.callback = cb
l.append(3)
print(l)
| apache-2.0 |
boghison/servo | tests/wpt/harness/wptrunner/browsers/servodriver.py | 48 | 4743 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import tempfile
from mozprocess import ProcessHandler
from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,
ServoWebDriverRefTestExecutor)
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {"product": "servodriver",
"check_args": "check_args",
"browser": "ServoWebDriverBrowser",
"executor": {"testharness": "ServoWebDriverTestharnessExecutor",
"reftest": "ServoWebDriverRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
return rv
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "true",
"testharnessreport": "testharnessreport-servodriver.js",
"supports_debugger": True}
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoWebDriverBrowser(Browser):
used_ports = set()
def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1"):
Browser.__init__(self, logger)
self.binary = binary
self.webdriver_host = webdriver_host
self.webdriver_port = None
self.proc = None
self.debug_info = debug_info
self.hosts_path = make_hosts_file()
self.command = None
def start(self):
self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.webdriver_port)
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
debug_args, command = browser_command(self.binary,
["--cpu", "--hard-fail",
"--webdriver", str(self.webdriver_port),
"about:blank"],
self.debug_info)
self.command = command
self.command = debug_args + self.command
if not self.debug_info or not self.debug_info.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
self.logger.debug("Servo Started")
def stop(self):
self.logger.debug("Stopping browser")
if self.proc is not None:
try:
self.proc.kill()
except OSError:
# This can happen on Windows if the process is already dead
pass
def pid(self):
if self.proc is None:
return None
try:
return self.proc.pid
except AttributeError:
return None
def on_output(self, line):
"""Write a line of output from the process to the log"""
self.logger.process_output(self.pid(),
line.decode("utf8", "replace"),
command=" ".join(self.command))
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.stop()
def executor_browser(self):
assert self.webdriver_port is not None
return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
"webdriver_port": self.webdriver_port}
| mpl-2.0 |
coteyr/home-assistant | homeassistant/components/device_tracker/tomato.py | 2 | 5090 | """
homeassistant.components.device_tracker.tomato
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a Tomato router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tomato/
"""
import json
import logging
import re
import threading
from datetime import timedelta
import requests
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
CONF_HTTP_ID = "http_id"
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a Tomato scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME,
CONF_PASSWORD, CONF_HTTP_ID]},
_LOGGER):
return None
return TomatoDeviceScanner(config[DOMAIN])
class TomatoDeviceScanner(object):
""" This class queries a wireless router running Tomato firmware
for connected devices.
A description of the Tomato API can be found on
http://paulusschoutsen.nl/blog/2013/10/tomato-api-documentation/
"""
def __init__(self, config):
host, http_id = config[CONF_HOST], config[CONF_HTTP_ID]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.req = requests.Request('POST',
'http://{}/update.cgi'.format(host),
data={'_http_id': http_id,
'exec': 'devlist'},
auth=requests.auth.HTTPBasicAuth(
username, password)).prepare()
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.logger = logging.getLogger("{}.{}".format(__name__, "Tomato"))
self.lock = threading.Lock()
self.last_results = {"wldev": [], "dhcpd_lease": []}
self.success_init = self._update_tomato_info()
def scan_devices(self):
""" Scans for new devices and return a
list containing found device ids. """
self._update_tomato_info()
return [item[1] for item in self.last_results['wldev']]
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
filter_named = [item[0] for item in self.last_results['dhcpd_lease']
if item[2] == device]
if not filter_named or not filter_named[0]:
return None
else:
return filter_named[0]
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_tomato_info(self):
""" Ensures the information from the Tomato router is up to date.
Returns boolean if scanning successful. """
with self.lock:
self.logger.info("Scanning")
try:
response = requests.Session().send(self.req, timeout=3)
# Calling and parsing the Tomato api here. We only need the
# wldev and dhcpd_lease values. For API description see:
# http://paulusschoutsen.nl/
# blog/2013/10/tomato-api-documentation/
if response.status_code == 200:
for param, value in \
self.parse_api_pattern.findall(response.text):
if param == 'wldev' or param == 'dhcpd_lease':
self.last_results[param] = \
json.loads(value.replace("'", '"'))
return True
elif response.status_code == 401:
# Authentication error
self.logger.exception((
"Failed to authenticate, "
"please check your username and password"))
return False
except requests.exceptions.ConnectionError:
# We get this if we could not connect to the router or
# an invalid http_id was supplied
self.logger.exception((
"Failed to connect to the router"
" or invalid http_id supplied"))
return False
except requests.exceptions.Timeout:
# We get this if we could not connect to the router or
# an invalid http_id was supplied
self.logger.exception(
"Connection to the router timed out")
return False
except ValueError:
# If json decoder could not parse the response
self.logger.exception(
"Failed to parse response from router")
return False
| mit |
sapcc/monasca-agent | monasca_setup/detection/plugins/mysql.py | 1 | 6117 | # (C) Copyright 2015,2016 Hewlett Packard Enterprise Development Company LP
import logging
import os
from six.moves import configparser
import monasca_setup.agent_config
import monasca_setup.detection
from monasca_setup.detection.utils import find_process_name
log = logging.getLogger(__name__)
mysql_conf = '/root/.my.cnf'
HOST = 'localhost'
PORT = 3306
SOCKET = '/var/run/mysqld/mysqld.sock'
class MySQL(monasca_setup.detection.Plugin):
"""Detect MySQL daemons and setup configuration to monitor them.
This plugin needs user/password info for mysql setup.
It needs either the host ip or socket if using
the default localhost hostname. You cannot use
the localhost name if using ssl. This plugin
accepts arguments, and if none are input it will
try to read the default config file which is
best placed in /root/.my.cnf in a format such as
[client]
user=root
password=yourpassword
host=padawan-ccp-c1-m1-mgmt
ssl_ca=/etc/ssl/certs/ca-certificates.crt
"""
def _detect(self):
"""Run detection, set self.available True if the service is detected.
"""
process_exist = find_process_name('mysqld') is not None
has_dependencies = self.dependencies_installed()
has_args_or_config_file = (self.args is not None or
os.path.isfile(mysql_conf))
self.available = (process_exist and has_args_or_config_file and
has_dependencies)
if not self.available:
if not process_exist:
log.error('MySQL process does not exist.')
elif not has_args_or_config_file:
log.error(('MySQL process exists but '
'configuration file was not found and '
'no arguments were given.'))
elif not has_dependencies:
log.error(('MySQL process exists but required dependence '
'PyMySQL is not installed.'))
def _get_config(self):
"""Set the configuration to be used for connecting to mysql
:return:
"""
self.ssl_options = {}
# reads default config file if no input parameters
if self.args is None:
self._read_config(mysql_conf)
else:
self.host = self.args.get('host', HOST)
self.port = self.args.get('port', PORT)
self.user = self.args.get('user', 'root')
self.password = self.args.get('password', None)
self.socket = self.args.get('socket', None)
self.ssl_ca = self.args.get('ssl_ca', None)
self.ssl_key = self.args.get('ssl_key', None)
self.ssl_cert = self.args.get('ssl_cert', None)
if self.ssl_ca is not None:
self.ssl_options['ca'] = self.ssl_ca
if self.ssl_key is not None:
self.ssl_options['key'] = self.ssl_key
if self.ssl_cert is not None:
self.ssl_options['cert'] = self.ssl_cert
if self.socket is None and (self.host == 'localhost' or self.host == '127.0.0.1'):
self.socket = SOCKET
def _read_config(self, config_file):
"""Read the configuration setting member variables as appropriate.
:param config_file: The filename of the configuration to read and parse
"""
log.info("\tUsing client credentials from {}".format(config_file))
parser = configparser.RawConfigParser(defaults={
'user': None,
'password': None,
'host': HOST,
'port': PORT,
'socket': None,
'ssl_ca': None,
'ssl_key': None,
'ssl_cert': None
}, allow_no_value=True)
parser.read(config_file)
self.user = parser.get('client', 'user')
self.password = parser.get('client', 'password')
self.host = parser.get('client', 'host')
self.port = parser.get('client', 'port')
self.socket = parser.get('client', 'socket')
self.ssl_ca = parser.get('client', 'ssl_ca')
self.ssl_key = parser.get('client', 'ssl_key')
self.ssl_cert = parser.get('client', 'ssl_cert')
def build_config(self):
"""Build the config as a Plugins object and return.
"""
config = monasca_setup.agent_config.Plugins()
# First watch the process
config.merge(monasca_setup.detection.watch_process(['mysqld'], component='mysql'))
log.info("\tWatching the mysqld process.")
try:
import pymysql
self._get_config()
# connection test
pymysql.connect(host=self.host, user=self.user,
passwd=self.password, port=self.port,
unix_socket=self.socket, ssl=self.ssl_options)
log.info("\tConnection test success.")
config['mysql'] = {
'init_config': None, 'instances':
[{'name': self.host, 'server': self.host, 'port': self.port,
'user': self.user, 'pass': self.password,
'sock': self.socket, 'ssl_ca': self.ssl_ca,
'ssl_key': self.ssl_key, 'ssl_cert': self.ssl_cert}]}
except ImportError as e:
exception_msg = ('The mysql dependency PyMySQL is not '
'installed. {}'.format(e))
log.exception(exception_msg)
raise Exception(exception_msg)
except pymysql.MySQLError as e:
exception_msg = 'Could not connect to mysql. {}'.format(e)
log.exception(exception_msg)
raise Exception(exception_msg)
except Exception as e:
exception_msg = 'Error configuring the mysql check plugin. {}'.format(e)
log.exception(exception_msg)
raise Exception(exception_msg)
return config
def dependencies_installed(self):
try:
import pymysql
except ImportError:
return False
return True
| bsd-3-clause |
466152112/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
Shaps/ansible | test/support/windows-integration/plugins/modules/win_copy.py | 71 | 6764 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_copy
version_added: '1.9.2'
short_description: Copies files to remote locations on windows hosts
description:
- The C(win_copy) module copies a file on the local box to remote windows locations.
- For non-Windows targets, use the M(copy) module instead.
options:
content:
description:
- When used instead of C(src), sets the contents of a file directly to the
specified value.
- This is for simple values, for anything complex or with formatting please
switch to the M(template) module.
type: str
version_added: '2.3'
decrypt:
description:
- This option controls the autodecryption of source files using vault.
type: bool
default: yes
version_added: '2.5'
dest:
description:
- Remote absolute path where the file should be copied to.
- If C(src) is a directory, this must be a directory too.
- Use \ for path separators or \\ when in "double quotes".
- If C(dest) ends with \ then source or the contents of source will be
copied to the directory without renaming.
- If C(dest) is a nonexistent path, it will only be created if C(dest) ends
with "/" or "\", or C(src) is a directory.
- If C(src) and C(dest) are files and if the parent directory of C(dest)
doesn't exist, then the task will fail.
type: path
required: yes
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
- No backup is taken when C(remote_src=False) and multiple files are being
copied.
type: bool
default: no
version_added: '2.8'
force:
description:
- If set to C(yes), the file will only be transferred if the content
is different than destination.
- If set to C(no), the file will only be transferred if the
destination does not exist.
- If set to C(no), no checksuming of the content is performed which can
help improve performance on larger files.
type: bool
default: yes
version_added: '2.3'
local_follow:
description:
- This flag indicates that filesystem links in the source tree, if they
exist, should be followed.
type: bool
default: yes
version_added: '2.4'
remote_src:
description:
- If C(no), it will search for src at originating/master machine.
- If C(yes), it will go to the remote/target machine for the src.
type: bool
default: no
version_added: '2.3'
src:
description:
- Local path to a file to copy to the remote server; can be absolute or
relative.
- If path is a directory, it is copied (including the source folder name)
recursively to C(dest).
- If path is a directory and ends with "/", only the inside contents of
that directory are copied to the destination. Otherwise, if it does not
end with "/", the directory itself with all contents is copied.
- If path is a file and dest ends with "\", the file is copied to the
folder with the same filename.
- Required unless using C(content).
type: path
notes:
- Currently win_copy does not support copying symbolic links from both local to
remote and remote to remote.
- It is recommended that backslashes C(\) are used instead of C(/) when dealing
with remote paths.
- Because win_copy runs over WinRM, it is not a very efficient transfer
mechanism. If sending large files consider hosting them on a web service and
using M(win_get_url) instead.
seealso:
- module: assemble
- module: copy
- module: win_get_url
- module: win_robocopy
author:
- Jon Hawkesworth (@jhawkesworth)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Copy a single file
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
- name: Copy a single file, but keep a backup
win_copy:
src: /srv/myfiles/foo.conf
dest: C:\Temp\renamed-foo.conf
backup: yes
- name: Copy a single file keeping the filename
win_copy:
src: /src/myfiles/foo.conf
dest: C:\Temp\
- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
win_copy:
src: files/temp_files
dest: C:\Temp
- name: Copy folder contents recursively
win_copy:
src: files/temp_files/
dest: C:\Temp
- name: Copy a single file where the source is on the remote host
win_copy:
src: C:\Temp\foo.txt
dest: C:\ansible\foo.txt
remote_src: yes
- name: Copy a folder recursively where the source is on the remote host
win_copy:
src: C:\Temp
dest: C:\ansible
remote_src: yes
- name: Set the contents of a file
win_copy:
content: abc123
dest: C:\Temp\foo.txt
- name: Copy a single file as another user
win_copy:
src: NuGet.config
dest: '%AppData%\NuGet\NuGet.config'
vars:
ansible_become_user: user
ansible_become_password: pass
# The tmp dir must be set when using win_copy as another user
# This ensures the become user will have permissions for the operation
# Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
ansible_remote_tmp: 'c:\tmp'
'''
RETURN = r'''
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
dest:
description: Destination file/path.
returned: changed
type: str
sample: C:\Temp\
src:
description: Source file used for the copy on the target machine.
returned: changed
type: str
sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
checksum:
description: SHA1 checksum of the file after running copy.
returned: success, src is a file
type: str
sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
size:
description: Size of the target, after execution.
returned: changed, src is a file
type: int
sample: 1220
operation:
description: Whether a single file copy took place or a folder copy.
returned: success
type: str
sample: file_copy
original_basename:
description: Basename of the copied file.
returned: changed, src is a file
type: str
sample: foo.txt
'''
| gpl-3.0 |
colinligertwood/odoo | openerp/addons/base/module/wizard/base_module_configuration.py | 447 | 2274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class base_module_configuration(osv.osv_memory):
_name = "base.module.configuration"
def start(self, cr, uid, ids, context=None):
todo_ids = self.pool.get('ir.actions.todo').search(cr, uid,
['|', ('type','=','recurring'), ('state', '=', 'open')])
if not todo_ids:
# When there is no wizard todo it will display message
data_obj = self.pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'base', 'view_base_module_configuration_form')
view_id = data_obj.browse(cr, uid, result).res_id
value = {
'name': _('System Configuration done'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.configuration',
'view_id': [view_id],
'type': 'ir.actions.act_window',
'target': 'new'
}
return value
# Run the config wizards
config_pool = self.pool.get('res.config')
return config_pool.start(cr, uid, ids, context=context)
base_module_configuration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yohanko88/gem5-DC | tests/long/se/20.parser/test.py | 56 | 1742 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
m5.util.addToPath('../configs/common')
from cpu2000 import parser
workload = parser(isa, opsys, 'mdred')
root.system.cpu[0].workload = workload.makeLiveProcess()
| bsd-3-clause |
ericholscher/django-tastypie | tastypie/bundle.py | 47 | 1084 | from __future__ import unicode_literals
from django.http import HttpRequest
# In a separate file to avoid circular imports...
class Bundle(object):
"""
A small container for instances and converted data for the
``dehydrate/hydrate`` cycle.
Necessary because the ``dehydrate/hydrate`` cycle needs to access data at
different points.
"""
def __init__(self,
obj=None,
data=None,
request=None,
related_obj=None,
related_name=None,
objects_saved=None,
related_objects_to_save=None,
):
self.obj = obj
self.data = data or {}
self.request = request or HttpRequest()
self.related_obj = related_obj
self.related_name = related_name
self.errors = {}
self.objects_saved = objects_saved or set()
self.related_objects_to_save = related_objects_to_save or {}
def __repr__(self):
return "<Bundle for obj: '%s' and with data: '%s'>" % (self.obj, self.data)
| bsd-3-clause |
frishberg/django | django/contrib/auth/backends.py | 51 | 6724 | from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
UserModel = get_user_model()
class ModelBackend(object):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, request, username=None, password=None, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a non-existing user (#20760).
UserModel().set_password(password)
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
def user_can_authenticate(self, user):
"""
Reject users with is_active=False. Custom user models that don't have
that attribute are allowed.
"""
is_active = getattr(user, 'is_active', None)
return is_active or is_active is None
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Returns the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = '_%s_perm_cache' % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms))
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, 'user')
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, 'group')
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = self.get_user_permissions(user_obj)
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
user = UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
class AllowAllUsersModelBackend(ModelBackend):
def user_can_authenticate(self, user):
return True
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user if self.user_can_authenticate(user) else None
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
class AllowAllUsersRemoteUserBackend(RemoteUserBackend):
def user_can_authenticate(self, user):
return True
| bsd-3-clause |
EricNeedham/assignment-1 | venv/lib/python2.7/site-packages/sqlalchemy/sql/visitors.py | 32 | 9943 | # sql/visitors.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/2008/01/23/expression-transformations/
"""
from collections import deque
from .. import util
import operator
from .. import exc
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'traverse_depthfirst',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which assigns a `_compiler_dispatch` method to classes
having a `__visit_name__` attribute.
The _compiler_dispatch attribute becomes an instance method which
looks approximately like the following::
def _compiler_dispatch (self, visitor, **kw):
'''Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.'''
visit_attr = 'visit_%s' % self.__visit_name__
return getattr(visitor, visit_attr)(self, **kw)
Classes having no __visit_name__ attribute will remain unaffected.
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != 'Visitable' and \
hasattr(cls, '__visit_name__'):
_generate_dispatch(cls)
super(VisitableType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatch(cls):
"""Return an optimized visit dispatch function for the cls
for use by the compiler.
"""
if '__visit_name__' in cls.__dict__:
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
# There is an optimization opportunity here because the
# the string name of the class's __visit_name__ is known at
# this early stage (import time) so it can be pre-constructed.
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
else:
# The optimization opportunity is lost for this case because the
# __visit_name__ is not yet a string. As a result, the visit
# string has to be recalculated with each compilation.
def _compiler_dispatch(self, visitor, **kw):
visit_attr = 'visit_%s' % self.__visit_name__
try:
meth = getattr(visitor, visit_attr)
except AttributeError:
raise exc.UnsupportedCompilationError(visitor, cls)
else:
return meth(self, **kw)
_compiler_dispatch.__doc__ = \
"""Look for an attribute named "visit_" + self.__visit_name__
on the visitor, and call it with the same kw params.
"""
cls._compiler_dispatch = _compiler_dispatch
class Visitable(util.with_metaclass(VisitableType, object)):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of
objects.
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default
iterator.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the
depth-first iterator.
"""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing
modifications by visitors."""
cloned = {}
stop_on = set(opts.get('stop_on', []))
def clone(elem):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
cloned[id(elem)] = newelem = elem._clone()
newelem._copy_internals(clone=clone)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(obj)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element
replacement by a given replacement function."""
cloned = {}
stop_on = set([id(x) for x in opts.get('stop_on', [])])
def clone(elem, **kw):
if id(elem) in stop_on or \
'no_replacement_traverse' in elem._annotations:
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
if elem not in cloned:
cloned[elem] = newelem = elem._clone()
newelem._copy_internals(clone=clone, **kw)
return cloned[elem]
if obj is not None:
obj = clone(obj, **opts)
return obj
| mit |
xxhank/namebench | libnamebench/better_webbrowser.py | 175 | 4191 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os.path
import subprocess
import sys
import traceback
import webbrowser
import util
def output(string):
print string
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
Args:
url: string
Returns:
tuple of: (executable, arg1, arg2, ...)
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
browser_type = None
try:
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
browser_type = 'user'
except WindowsError:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
'Software\Classes\http\shell\open\command')
browser_type = 'machine'
except:
return False
cmd = _winreg.EnumValue(key, 0)[1]
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
if not os.path.exists(executable):
output('$ Default HTTP browser does not exist: %s' % executable)
return False
else:
output('$ %s HTTP handler: %s' % (browser_type, executable))
return (executable, url)
def open(url):
"""Opens a URL, overriding the normal webbrowser.open methods for sanity."""
try:
webbrowser.open(url, new=1, autoraise=True)
# If the user is missing the osascript binary - see
# http://code.google.com/p/namebench/issues/detail?id=88
except:
output('Failed to open: [%s]: %s' % (url, util.GetLastExceptionString()))
if os.path.exists('/usr/bin/open'):
try:
output('trying open: %s' % url)
p = subprocess.Popen(('open', url))
p.wait()
except:
output('open did not seem to work: %s' % util.GetLastExceptionString())
elif sys.platform[:3] == 'win':
try:
output('trying default Windows controller: %s' % url)
controller = webbrowser.get('windows-default')
controller.open_new(url)
except:
output('WindowsController did not work: %s' % util.GetLastExceptionString())
# *NOTE*: EVIL IMPORT SIDE EFFECTS AHEAD!
#
# If we are running on Windows, register the WindowsHttpDefault class.
if sys.platform[:3] == 'win':
import _winreg
# We don't want to load this class by default, because Python 2.4 doesn't have BaseBrowser.
class WindowsHttpDefault(webbrowser.BaseBrowser):
"""Provide an alternate open class for Windows user, using the http handler."""
def open(self, url, new=0, autoraise=1):
command_args = create_win32_http_cmd(url)
if not command_args:
output('$ Could not find HTTP handler')
return False
output('command_args:')
output(command_args)
# Avoid some unicode path issues by moving our current directory
old_pwd = os.getcwd()
os.chdir('C:\\')
try:
_unused = subprocess.Popen(command_args)
os.chdir(old_pwd)
return True
except:
traceback.print_exc()
output('$ Failed to run HTTP handler, trying next browser.')
os.chdir(old_pwd)
return False
webbrowser.register('windows-http', WindowsHttpDefault, update_tryorder=-1)
| apache-2.0 |
spirrello/spirrello-pynet-work | applied_python/lib64/python2.7/site-packages/yaml/representer.py | 359 | 17642 |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| gpl-3.0 |
Drudenhaus/aws-ec2rescue-linux | lib/botocore/validate.py | 2 | 11501 | """User input parameter validation.
This module handles user input parameter validation
against a provided input model.
Note that the objects in this module do *not* mutate any
arguments. No type version happens here. It is up to another
layer to properly convert arguments to any required types.
Validation Errors
-----------------
"""
from botocore.compat import six
import decimal
import json
from datetime import datetime
from botocore.utils import parse_to_aware_datetime
from botocore.utils import is_json_value_header
from botocore.exceptions import ParamValidationError
def validate_parameters(params, shape):
"""Validates input parameters against a schema.
This is a convenience function that validates parameters against a schema.
You can also instantiate and use the ParamValidator class directly if you
want more control.
If there are any validation errors then a ParamValidationError
will be raised. If there are no validation errors than no exception
is raised and a value of None is returned.
:param params: The user provided input parameters.
:type shape: botocore.model.Shape
:param shape: The schema which the input parameters should
adhere to.
:raise: ParamValidationError
"""
validator = ParamValidator()
report = validator.validate(params, shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
def type_check(valid_types):
def _create_type_check_guard(func):
def _on_passes_type_check(self, param, shape, errors, name):
if _type_check(param, errors, name):
return func(self, param, shape, errors, name)
def _type_check(param, errors, name):
if not isinstance(param, valid_types):
valid_type_names = [six.text_type(t) for t in valid_types]
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
return False
return True
return _on_passes_type_check
return _create_type_check_guard
def range_check(name, value, shape, error_type, errors):
failed = False
min_allowed = float('-inf')
max_allowed = float('inf')
if 'min' in shape.metadata:
min_allowed = shape.metadata['min']
if value < min_allowed:
failed = True
elif hasattr(shape, 'serialization'):
# Members that can be bound to the host have an implicit min of 1
if shape.serialization.get('hostLabel'):
min_allowed = 1
if value < min_allowed:
failed = True
if failed:
errors.report(name, error_type, param=value,
valid_range=[min_allowed, max_allowed])
class ValidationErrors(object):
def __init__(self):
self._errors = []
def has_errors(self):
if self._errors:
return True
return False
def generate_report(self):
error_messages = []
for error in self._errors:
error_messages.append(self._format_error(error))
return '\n'.join(error_messages)
def _format_error(self, error):
error_type, name, additional = error
name = self._get_name(name)
if error_type == 'missing required field':
return 'Missing required parameter in %s: "%s"' % (
name, additional['required_name'])
elif error_type == 'unknown field':
return 'Unknown parameter in %s: "%s", must be one of: %s' % (
name, additional['unknown_param'],
', '.join(additional['valid_names']))
elif error_type == 'invalid type':
return 'Invalid type for parameter %s, value: %s, type: %s, ' \
'valid types: %s' % (name, additional['param'],
str(type(additional['param'])),
', '.join(additional['valid_types']))
elif error_type == 'invalid range':
min_allowed = additional['valid_range'][0]
max_allowed = additional['valid_range'][1]
return ('Invalid range for parameter %s, value: %s, valid range: '
'%s-%s' % (name, additional['param'],
min_allowed, max_allowed))
elif error_type == 'invalid length':
min_allowed = additional['valid_range'][0]
max_allowed = additional['valid_range'][1]
return ('Invalid length for parameter %s, value: %s, valid range: '
'%s-%s' % (name, additional['param'],
min_allowed, max_allowed))
elif error_type == 'unable to encode to json':
return 'Invalid parameter %s must be json serializable: %s' \
% (name, additional['type_error'])
def _get_name(self, name):
if not name:
return 'input'
elif name.startswith('.'):
return name[1:]
else:
return name
def report(self, name, reason, **kwargs):
self._errors.append((reason, name, kwargs))
class ParamValidator(object):
"""Validates parameters against a shape model."""
def validate(self, params, shape):
"""Validate parameters against a shape model.
This method will validate the parameters against a provided shape model.
All errors will be collected before returning to the caller. This means
that this method will not stop at the first error, it will return all
possible errors.
:param params: User provided dict of parameters
:param shape: A shape model describing the expected input.
:return: A list of errors.
"""
errors = ValidationErrors()
self._validate(params, shape, errors, name='')
return errors
def _check_special_validation_cases(self, shape):
if is_json_value_header(shape):
return self._validate_jsonvalue_string
def _validate(self, params, shape, errors, name):
special_validator = self._check_special_validation_cases(shape)
if special_validator:
special_validator(params, shape, errors, name)
else:
getattr(self, '_validate_%s' % shape.type_name)(
params, shape, errors, name)
def _validate_jsonvalue_string(self, params, shape, errors, name):
# Check to see if a value marked as a jsonvalue can be dumped to
# a json string.
try:
json.dumps(params)
except (ValueError, TypeError) as e:
errors.report(name, 'unable to encode to json', type_error=e)
@type_check(valid_types=(dict,))
def _validate_structure(self, params, shape, errors, name):
# Validate required fields.
for required_member in shape.metadata.get('required', []):
if required_member not in params:
errors.report(name, 'missing required field',
required_name=required_member, user_params=params)
members = shape.members
known_params = []
# Validate known params.
for param in params:
if param not in members:
errors.report(name, 'unknown field', unknown_param=param,
valid_names=list(members))
else:
known_params.append(param)
# Validate structure members.
for param in known_params:
self._validate(params[param], shape.members[param],
errors, '%s.%s' % (name, param))
@type_check(valid_types=six.string_types)
def _validate_string(self, param, shape, errors, name):
# Validate range. For a string, the min/max contraints
# are of the string length.
# Looks like:
# "WorkflowId":{
# "type":"string",
# "min":1,
# "max":256
# }
range_check(name, len(param), shape, 'invalid length', errors)
@type_check(valid_types=(list, tuple))
def _validate_list(self, param, shape, errors, name):
member_shape = shape.member
range_check(name, len(param), shape, 'invalid length', errors)
for i, item in enumerate(param):
self._validate(item, member_shape, errors, '%s[%s]' % (name, i))
@type_check(valid_types=(dict,))
def _validate_map(self, param, shape, errors, name):
key_shape = shape.key
value_shape = shape.value
for key, value in param.items():
self._validate(key, key_shape, errors, "%s (key: %s)"
% (name, key))
self._validate(value, value_shape, errors, '%s.%s' % (name, key))
@type_check(valid_types=six.integer_types)
def _validate_integer(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_blob(self, param, shape, errors, name):
if isinstance(param, (bytes, bytearray, six.text_type)):
return
elif hasattr(param, 'read'):
# File like objects are also allowed for blob types.
return
else:
errors.report(name, 'invalid type', param=param,
valid_types=[str(bytes), str(bytearray),
'file-like object'])
@type_check(valid_types=(bool,))
def _validate_boolean(self, param, shape, errors, name):
pass
@type_check(valid_types=(float, decimal.Decimal) + six.integer_types)
def _validate_double(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
_validate_float = _validate_double
@type_check(valid_types=six.integer_types)
def _validate_long(self, param, shape, errors, name):
range_check(name, param, shape, 'invalid range', errors)
def _validate_timestamp(self, param, shape, errors, name):
# We don't use @type_check because datetimes are a bit
# more flexible. You can either provide a datetime
# object, or a string that parses to a datetime.
is_valid_type = self._type_check_datetime(param)
if not is_valid_type:
valid_type_names = [six.text_type(datetime), 'timestamp-string']
errors.report(name, 'invalid type', param=param,
valid_types=valid_type_names)
def _type_check_datetime(self, value):
try:
parse_to_aware_datetime(value)
return True
except (TypeError, ValueError, AttributeError):
# Yes, dateutil can sometimes raise an AttributeError
# when parsing timestamps.
return False
class ParamValidationDecorator(object):
def __init__(self, param_validator, serializer):
self._param_validator = param_validator
self._serializer = serializer
def serialize_to_request(self, parameters, operation_model):
input_shape = operation_model.input_shape
if input_shape is not None:
report = self._param_validator.validate(parameters,
operation_model.input_shape)
if report.has_errors():
raise ParamValidationError(report=report.generate_report())
return self._serializer.serialize_to_request(parameters,
operation_model)
| apache-2.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/tkinter/test/test_tkinter/test_loadtk.py | 162 | 1503 | import os
import sys
import unittest
import test.support as test_support
from tkinter import Tcl, TclError
test_support.requires('gui')
class TkLoadTest(unittest.TestCase):
@unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.')
def testLoadTk(self):
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
old_display = None
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
# no failure possible on windows?
# XXX Maybe on tk older than 8.4.13 it would be possible,
# see tkinter.h.
return
with test_support.EnvironmentVarGuard() as env:
if 'DISPLAY' in os.environ:
del env['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
with os.popen('echo $DISPLAY') as pipe:
display = pipe.read().strip()
if display:
return
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
tests_gui = (TkLoadTest, )
if __name__ == "__main__":
test_support.run_unittest(*tests_gui)
| apache-2.0 |
koyuawsmbrtn/eclock | windows/Python27/Lib/site-packages/pygame/tests/run_tests__tests/incomplete/fake_2_test.py | 18 | 1195 | if __name__ == '__main__':
import sys
import os
pkg_dir = (os.path.split(
os.path.split(
os.path.split(
os.path.abspath(__file__))[0])[0])[0])
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils import unittest
else:
from test import test_utils
from test.test_utils import unittest
class KeyModuleTest(unittest.TestCase):
def test_get_focused(self):
self.assert_(True)
def test_get_mods(self):
self.assert_(True)
def test_get_pressed(self):
self.assert_(test_utils.test_not_implemented())
def test_name(self):
self.assert_(True)
def test_set_mods(self):
self.assert_(test_utils.test_not_implemented())
def test_set_repeat(self):
self.assert_(True)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
delinhabit/django | django/contrib/auth/management/commands/createsuperuser.py | 88 | 8576 | """
Management utility to create superusers.
"""
from __future__ import unicode_literals
import getpass
import sys
from django.contrib.auth import get_user_model
from django.contrib.auth.management import get_default_username
from django.contrib.auth.password_validation import validate_password
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
from django.utils.six.moves import input
from django.utils.text import capfirst
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
help = 'Used to create a superuser.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD)
def add_arguments(self, parser):
parser.add_argument('--%s' % self.UserModel.USERNAME_FIELD,
dest=self.UserModel.USERNAME_FIELD, default=None,
help='Specifies the login for the superuser.')
parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,
help=('Tells Django to NOT prompt the user for input of any kind. '
'You must use --%s with --noinput, along with an option for '
'any other required field. Superusers created with --noinput will '
' not be able to log in until they\'re given a valid password.' %
self.UserModel.USERNAME_FIELD))
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".')
for field in self.UserModel.REQUIRED_FIELDS:
parser.add_argument('--%s' % field, dest=field, default=None,
help='Specifies the %s for the superuser.' % field)
def execute(self, *args, **options):
self.stdin = options.get('stdin', sys.stdin) # Used for testing
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
username = options.get(self.UserModel.USERNAME_FIELD)
database = options.get('database')
# If not provided, create the user with an unusable password
password = None
user_data = {}
# Same as user_data but with foreign keys as fake model instances
# instead of raw IDs.
fake_user_data = {}
# Do quick and dirty validation if --noinput
if not options['interactive']:
try:
if not username:
raise CommandError("You must use --%s with --noinput." %
self.UserModel.USERNAME_FIELD)
username = self.username_field.clean(username, None)
for field_name in self.UserModel.REQUIRED_FIELDS:
if options.get(field_name):
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = field.clean(options[field_name], None)
else:
raise CommandError("You must use --%s with --noinput." % field_name)
except exceptions.ValidationError as e:
raise CommandError('; '.join(e.messages))
else:
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to catch
# KeyboardInterrupt and exit gracefully.
default_username = get_default_username()
try:
if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():
raise NotRunningInTTYException("Not running in a TTY")
# Get a username
verbose_field_name = self.username_field.verbose_name
while username is None:
input_msg = capfirst(verbose_field_name)
if default_username:
input_msg += " (leave blank to use '%s')" % default_username
username_rel = self.username_field.remote_field
input_msg = force_str('%s%s: ' % (
input_msg,
' (%s.%s)' % (
username_rel.model._meta.object_name,
username_rel.field_name
) if username_rel else '')
)
username = self.get_input_data(self.username_field, input_msg, default_username)
if not username:
continue
if self.username_field.unique:
try:
self.UserModel._default_manager.db_manager(database).get_by_natural_key(username)
except self.UserModel.DoesNotExist:
pass
else:
self.stderr.write("Error: That %s is already taken." % verbose_field_name)
username = None
for field_name in self.UserModel.REQUIRED_FIELDS:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = options.get(field_name)
while user_data[field_name] is None:
message = force_str('%s%s: ' % (
capfirst(field.verbose_name),
' (%s.%s)' % (
field.remote_field.model._meta.object_name,
field.remote_field.field_name,
) if field.remote_field else '',
))
input_value = self.get_input_data(field, message)
user_data[field_name] = input_value
fake_user_data[field_name] = input_value
# Wrap any foreign keys in fake model instances
if field.remote_field:
fake_user_data[field_name] = field.remote_field.model(input_value)
# Get a password
while password is None:
password = getpass.getpass()
password2 = getpass.getpass(force_str('Password (again): '))
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
# Don't validate passwords that don't match.
continue
if password.strip() == '':
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
# Don't validate blank passwords.
continue
try:
validate_password(password2, self.UserModel(**fake_user_data))
except exceptions.ValidationError as err:
self.stderr.write(', '.join(err.messages))
password = None
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except NotRunningInTTYException:
self.stdout.write(
"Superuser creation skipped due to not running in a TTY. "
"You can run `manage.py createsuperuser` in your project "
"to create one manually."
)
if username:
user_data[self.UserModel.USERNAME_FIELD] = username
user_data['password'] = password
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
if options['verbosity'] >= 1:
self.stdout.write("Superuser created successfully.")
def get_input_data(self, field, message, default=None):
"""
Override this method if you want to customize data inputs or
validation exceptions.
"""
raw_value = input(message)
if default and raw_value == '':
raw_value = default
try:
val = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
val = None
return val
| bsd-3-clause |
jhunufa/ArduWatchRaspSerial | virtualenv/lib/python3.4/site-packages/pip/_vendor/html5lib/trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| mit |
FRidh/scipy | scipy/io/tests/test_idl.py | 18 | 19258 | from __future__ import division, print_function, absolute_import
from os import path
from warnings import catch_warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal, run_module_suite,
assert_)
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=np.object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is np.str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=np.object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(np.object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(np.object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=np.object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=np.object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with catch_warnings(record=True) as w:
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/test/test_cmd_line.py | 16 | 3751 | # Tests invocation of the interpreter with various command line arguments
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.test_support, unittest
import sys
from test.script_helper import spawn_python, kill_python, python_exit_code
class CmdLineTest(unittest.TestCase):
def start_python(self, *args):
p = spawn_python(*args)
return kill_python(p)
def exit_code(self, *args):
return python_exit_code(*args)
def test_directories(self):
self.assertNotEqual(self.exit_code('.'), 0)
self.assertNotEqual(self.exit_code('< .'), 0)
def verify_valid_flag(self, cmd_line):
data = self.start_python(cmd_line)
self.assertTrue(data == '' or data.endswith('\n'))
self.assertNotIn('Traceback', data)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
self.assertIn('usage', self.start_python('-h'))
def test_version(self):
version = 'Python %d.%d' % sys.version_info[:2]
self.assertTrue(self.start_python('-V').startswith(version))
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-m'), 0)
# Check we get an error for a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'fnord43520xyz'),
0)
# Check the runpy module also gives an error for
# a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'runpy', 'fnord43520xyz'),
0)
# All good if module is located and run successfully
self.assertEqual(
self.exit_code('-m', 'timeit', '-n', '1'),
0)
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write('Timer\n')
p.stdin.write('exit()\n')
data = kill_python(p)
self.assertTrue(data.startswith('1 loop'))
self.assertIn('__main__.Timer', data)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-c'), 0)
# Check we get an error for an uncaught exception
self.assertNotEqual(
self.exit_code('-c', 'raise Exception'),
0)
# All good if execution is successful
self.assertEqual(
self.exit_code('-c', 'pass'),
0)
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
data = self.start_python('-R', '-c', code)
hashes.append(data)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print sys.flags'
data = self.start_python('-R', '-c', code)
self.assertTrue('hash_randomization=1' in data)
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
Darksig/DarkSig | qa/pull-tester/pull-tester.py | 167 | 8944 | #!/usr/bin/python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old BitcoinPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/BitcoindComparisonTool_jar/BitcoindComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| mit |
nguyenhongson03/scrapy | tests/test_cmdline/__init__.py | 41 | 1069 | import sys
from subprocess import Popen, PIPE
import unittest
from scrapy.utils.test import get_testenv
class CmdlineTest(unittest.TestCase):
def setUp(self):
self.env = get_testenv()
self.env['SCRAPY_SETTINGS_MODULE'] = 'tests.test_cmdline.settings'
def _execute(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs)
comm = proc.communicate()
return comm[0].strip()
def test_default_settings(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'default')
def test_override_settings_using_set_arg(self):
self.assertEqual(self._execute('settings', '--get', 'TEST1', '-s', 'TEST1=override'), \
'override')
def test_override_settings_using_envvar(self):
self.env['SCRAPY_TEST1'] = 'override'
self.assertEqual(self._execute('settings', '--get', 'TEST1'), \
'override')
| bsd-3-clause |
Distrotech/qtwebkit | Tools/Scripts/webkitpy/tool/steps/validatereviewer.py | 119 | 2894 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
# FIXME: Some of this logic should probably be unified with CommitterValidator?
class ValidateReviewer(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def run(self, state):
# FIXME: For now we disable this check when a user is driving the script
# this check is too draconian (and too poorly tested) to foist upon users.
if not self._options.non_interactive:
return
for changelog_path in self.cached_lookup(state, "changelogs"):
changelog_entry = ChangeLog(changelog_path).latest_entry()
if changelog_entry.has_valid_reviewer():
continue
reviewer_text = changelog_entry.reviewer_text()
if reviewer_text:
_log.info("%s found in %s does not appear to be a valid reviewer according to committers.py." % (reviewer_text, changelog_path))
_log.error('%s neither lists a valid reviewer nor contains the string "Unreviewed" or "Rubber stamp" (case insensitive).' % changelog_path)
sys.exit(1)
| lgpl-3.0 |
zjuwangg/scrapy | tests/test_utils_signal.py | 121 | 2741 | from testfixtures import LogCapture
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import defer, reactor
from pydispatch import dispatcher
from scrapy.utils.signal import send_catch_log, send_catch_log_deferred
class SendCatchLogTest(unittest.TestCase):
@defer.inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
with LogCapture() as l:
result = yield defer.maybeDeferred(
self._get_result, test_signal, arg='test',
handlers_called=handlers_called
)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIn('error_handler', record.getMessage())
self.assertEqual(record.levelname, 'ERROR')
self.assertEqual(result[0][0], self.error_handler)
self.assert_(isinstance(result[0][1], Failure))
self.assertEqual(result[1], (self.ok_handler, "OK"))
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
a = 1/0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
return "OK"
class SendCatchLogDeferredTest(SendCatchLogTest):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogDeferredTest2(SendCatchLogTest):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
d = defer.Deferred()
reactor.callLater(0, d.callback, "OK")
return d
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogTest2(unittest.TestCase):
def test_error_logged_if_deferred_not_supported(self):
test_signal = object()
test_handler = lambda: defer.Deferred()
dispatcher.connect(test_handler, test_signal)
with LogCapture() as l:
send_catch_log(test_signal)
self.assertEqual(len(l.records), 1)
self.assertIn("Cannot return deferreds from signal handler", str(l))
dispatcher.disconnect(test_handler, test_signal)
| bsd-3-clause |
snahelou/awx | awx/main/tests/functional/api/test_organization_counts.py | 3 | 8757 | import pytest
from awx.api.versioning import reverse
@pytest.fixture
def organization_resource_creator(organization, user):
def rf(users, admins, job_templates, projects, inventories, teams):
# Associate one resource of every type with the organization
for i in range(users):
member_user = user('org-member %s' % i)
organization.member_role.members.add(member_user)
for i in range(admins):
admin_user = user('org-admin %s' % i)
organization.admin_role.members.add(admin_user)
for i in range(teams):
organization.teams.create(name='org-team %s' % i)
for i in range(inventories):
inventory = organization.inventories.create(name="associated-inv %s" % i)
for i in range(projects):
organization.projects.create(name="test-proj %s" % i,
description="test-proj-desc")
# Mix up the inventories and projects used by the job templates
i_proj = 0
i_inv = 0
for i in range(job_templates):
project = organization.projects.all()[i_proj]
inventory = organization.inventories.all()[i_inv]
project.jobtemplates.create(name="test-jt %s" % i,
description="test-job-template-desc",
inventory=inventory,
playbook="test_playbook.yml")
i_proj += 1
i_inv += 1
if i_proj >= organization.projects.count():
i_proj = 0
if i_inv >= organization.inventories.count():
i_inv = 0
return organization
return rf
COUNTS_PRIMES = {
'users': 11,
'admins': 5,
'job_templates': 3,
'projects': 3,
'inventories': 7,
'teams': 5
}
COUNTS_ZEROS = {
'users': 0,
'admins': 0,
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.fixture
def resourced_organization(organization_resource_creator):
return organization_resource_creator(**COUNTS_PRIMES)
@pytest.mark.django_db
def test_org_counts_detail_admin(resourced_organization, user, get):
# Check that all types of resources are counted by a superuser
external_admin = user('admin', True)
response = get(reverse('api:organization_detail',
kwargs={'pk': resourced_organization.pk}), external_admin)
assert response.status_code == 200
counts = response.data['summary_fields']['related_field_counts']
assert counts == COUNTS_PRIMES
@pytest.mark.django_db
def test_org_counts_detail_member(resourced_organization, user, get):
# Check that a non-admin org member can only see users / admin in detail view
member_user = resourced_organization.member_role.members.get(username='org-member 1')
response = get(reverse('api:organization_detail',
kwargs={'pk': resourced_organization.pk}), member_user)
assert response.status_code == 200
counts = response.data['summary_fields']['related_field_counts']
assert counts == {
'users': COUNTS_PRIMES['users'], # Policy is that members can see other users and admins
'admins': COUNTS_PRIMES['admins'],
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.mark.django_db
def test_org_counts_list_admin(resourced_organization, user, get):
# Check that all types of resources are counted by a superuser
external_admin = user('admin', True)
response = get(reverse('api:organization_list'), external_admin)
assert response.status_code == 200
counts = response.data['results'][0]['summary_fields']['related_field_counts']
assert counts == COUNTS_PRIMES
@pytest.mark.django_db
def test_org_counts_list_member(resourced_organization, user, get):
# Check that a non-admin user can only see the full project and
# user count, consistent with the RBAC rules
member_user = resourced_organization.member_role.members.get(username='org-member 1')
response = get(reverse('api:organization_list'), member_user)
assert response.status_code == 200
counts = response.data['results'][0]['summary_fields']['related_field_counts']
assert counts == {
'users': COUNTS_PRIMES['users'], # Policy is that members can see other users and admins
'admins': COUNTS_PRIMES['admins'],
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.mark.django_db
def test_new_org_zero_counts(user, post):
# Check that a POST to the organization list endpoint returns
# correct counts, including the new record
org_list_url = reverse('api:organization_list')
post_response = post(url=org_list_url, data={'name': 'test organization',
'description': ''}, user=user('admin', True))
assert post_response.status_code == 201
new_org_list = post_response.render().data
counts_dict = new_org_list['summary_fields']['related_field_counts']
assert counts_dict == COUNTS_ZEROS
@pytest.mark.django_db
def test_two_organizations(resourced_organization, organizations, user, get):
# Check correct results for two organizations are returned
external_admin = user('admin', True)
organization_zero = organizations(1)[0]
response = get(reverse('api:organization_list'), external_admin)
assert response.status_code == 200
org_id_full = resourced_organization.id
org_id_zero = organization_zero.id
counts = {}
for i in range(2):
org_id = response.data['results'][i]['id']
counts[org_id] = response.data['results'][i]['summary_fields']['related_field_counts']
assert counts[org_id_full] == COUNTS_PRIMES
assert counts[org_id_zero] == COUNTS_ZEROS
@pytest.mark.django_db
def test_scan_JT_counted(resourced_organization, user, get):
admin_user = user('admin', True)
counts_dict = COUNTS_PRIMES
# Test list view
list_response = get(reverse('api:organization_list'), admin_user)
assert list_response.status_code == 200
assert list_response.data['results'][0]['summary_fields']['related_field_counts'] == counts_dict
# Test detail view
detail_response = get(reverse('api:organization_detail', kwargs={'pk': resourced_organization.pk}), admin_user)
assert detail_response.status_code == 200
assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict
@pytest.mark.django_db
def test_JT_not_double_counted(resourced_organization, user, get):
admin_user = user('admin', True)
# Add a run job template to the org
resourced_organization.projects.all()[0].jobtemplates.create(
job_type='run',
inventory=resourced_organization.inventories.all()[0],
project=resourced_organization.projects.all()[0],
name='double-linked-job-template')
counts_dict = COUNTS_PRIMES
counts_dict['job_templates'] += 1
# Test list view
list_response = get(reverse('api:organization_list'), admin_user)
assert list_response.status_code == 200
assert list_response.data['results'][0]['summary_fields']['related_field_counts'] == counts_dict
# Test detail view
detail_response = get(reverse('api:organization_detail', kwargs={'pk': resourced_organization.pk}), admin_user)
assert detail_response.status_code == 200
assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict
@pytest.mark.django_db
def test_JT_associated_with_project(organizations, project, user, get):
# Check that adding a project to an organization gets the project's JT
# included in the organization's JT count
external_admin = user('admin', True)
two_orgs = organizations(2)
organization = two_orgs[0]
other_org = two_orgs[1]
unrelated_inv = other_org.inventories.create(name='not-in-organization')
organization.projects.add(project)
project.jobtemplates.create(name="test-jt",
description="test-job-template-desc",
inventory=unrelated_inv,
playbook="test_playbook.yml")
response = get(reverse('api:organization_list'), external_admin)
assert response.status_code == 200
org_id = organization.id
counts = {}
for org_json in response.data['results']:
working_id = org_json['id']
counts[working_id] = org_json['summary_fields']['related_field_counts']
assert counts[org_id] == {
'users': 0,
'admins': 0,
'job_templates': 1,
'projects': 1,
'inventories': 0,
'teams': 0
}
| apache-2.0 |
benoit-pierre/plover | plover_build_utils/check_requirements.py | 3 | 1537 | #!/usr/bin/env python3
from collections import OrderedDict
import pkg_resources
from plover.registry import Registry
def sorted_requirements(requirements):
return sorted(requirements, key=lambda r: str(r).lower())
# Find all available distributions.
all_requirements = [
dist.as_requirement()
for dist in pkg_resources.working_set
]
# Find Plover requirements.
plover_deps = set()
for dist in pkg_resources.require('plover'):
plover_deps.add(dist.as_requirement())
# Load plugins.
registry = Registry(suppress_errors=False)
registry.update()
# Find plugins requirements.
plugins = OrderedDict()
plugins_deps = set()
for plugin_dist in registry.list_distributions():
if plugin_dist.dist.project_name != 'plover':
plugins[plugin_dist.dist.as_requirement()] = set()
for requirement, deps in plugins.items():
for dist in pkg_resources.require(str(requirement)):
if dist.as_requirement() not in plover_deps:
deps.add(dist.as_requirement())
plugins_deps.update(deps)
# List requirements.
print('# plover')
for requirement in sorted_requirements(plover_deps):
print(requirement)
for requirement, deps in plugins.items():
print('#', requirement.project_name)
for requirement in sorted_requirements(deps):
print(requirement)
print('# other')
for requirement in sorted_requirements(all_requirements):
if requirement not in plover_deps and \
requirement not in plugins_deps:
print(requirement)
print('# ''vim: ft=cfg commentstring=#\ %s list')
| gpl-2.0 |
akirk/youtube-dl | youtube_dl/extractor/mlb.py | 142 | 7116 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
class MLBIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:[\da-z_-]+\.)*mlb\.com/
(?:
(?:
(?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|
(?:
shared/video/embed/(?:embed|m-internal-embed)\.html|
(?:[^/]+/)+(?:play|index)\.jsp|
)\?.*?\bcontent_id=
)
(?P<id>n?\d+)|
(?:[^/]+/)*(?P<path>[^/]+)
)
'''
_TESTS = [
{
'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
'md5': 'ff56a598c2cf411a9a38a69709e97079',
'info_dict': {
'id': '34698933',
'ext': 'mp4',
'title': "Ackley's spectacular catch",
'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
'duration': 66,
'timestamp': 1405980600,
'upload_date': '20140721',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',
'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',
'info_dict': {
'id': '34496663',
'ext': 'mp4',
'title': 'Stanton prepares for Derby',
'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',
'duration': 46,
'timestamp': 1405105800,
'upload_date': '20140711',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',
'md5': '0e6e73d509321e142409b695eadd541f',
'info_dict': {
'id': '34578115',
'ext': 'mp4',
'title': 'Cespedes repeats as Derby champ',
'description': 'md5:08df253ce265d4cf6fb09f581fafad07',
'duration': 488,
'timestamp': 1405399936,
'upload_date': '20140715',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',
'md5': 'b8fd237347b844365d74ea61d4245967',
'info_dict': {
'id': '34577915',
'ext': 'mp4',
'title': 'Bautista on Home Run Derby',
'description': 'md5:b80b34031143d0986dddc64a8839f0fb',
'duration': 52,
'timestamp': 1405390722,
'upload_date': '20140715',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer',
'md5': 'b190e70141fb9a1552a85426b4da1b5d',
'info_dict': {
'id': '75609783',
'ext': 'mp4',
'title': 'Must C: Pillar climbs for catch',
'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run',
'timestamp': 1429124820,
'upload_date': '20150415',
}
},
{
'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
'only_matching': True,
},
{
'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
'only_matching': True,
},
{
'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
'only_matching': True,
},
{
'url': 'http://m.cardinals.mlb.com/stl/video/v51175783/atlstl-piscotty-makes-great-sliding-catch-on-line/?partnerId=as_mlb_20150321_42500876&adbid=579409712979910656&adbpl=tw&adbpr=52847728',
'only_matching': True,
},
{
# From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer
'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb',
'only_matching': True,
},
{
'url': 'http://washington.nationals.mlb.com/mlb/gameday/index.jsp?c_id=was&gid=2015_05_09_atlmlb_wasmlb_1&lang=en&content_id=108309983&mode=video#',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if not video_id:
video_path = mobj.group('path')
webpage = self._download_webpage(url, video_path)
video_id = self._search_regex(
[r'data-video-?id="(\d+)"', r'content_id=(\d+)'], webpage, 'video id')
detail = self._download_xml(
'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'
% (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)
title = detail.find('./headline').text
description = detail.find('./big-blurb').text
duration = parse_duration(detail.find('./duration').text)
timestamp = parse_iso8601(detail.attrib['date'][:-5])
thumbnails = [{
'url': thumbnail.text,
} for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
formats = []
for media_url in detail.findall('./url'):
playback_scenario = media_url.attrib['playback_scenario']
fmt = {
'url': media_url.text,
'format_id': playback_scenario,
}
m = re.search(r'(?P<vbr>\d+)K_(?P<width>\d+)X(?P<height>\d+)', playback_scenario)
if m:
fmt.update({
'vbr': int(m.group('vbr')) * 1000,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
'thumbnails': thumbnails,
}
| unlicense |
Nicogue/Encuentro | server/scrapers_dqsv.py | 1 | 6154 | #!/usr/bin/env python3
# Copyright 2014 Facundo Batista
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://launchpad.net/encuentro
"""Scrapers for the decimequiensosvos backend."""
import datetime
import sys
from collections import namedtuple
from yaswfp import swfparser
Episode = namedtuple("Episode", "name occup bio image date")
class _ConstantPoolExtractor(object):
"""Get items from the constant pool."""
def __init__(self, constants, actions):
self.constants = constants
self.actions = actions
def get(self, *keys):
"""Get the text after some key."""
values = {}
stack = []
for act in self.actions:
if act.name == 'ActionPush':
if act.Type == 7:
idx = act.Integer
elif act.Type == 8:
idx = act.Constant8
elif act.Type in (5, 6):
continue
else:
raise ValueError("Bad act type: " + repr(act))
try:
val = self.constants[idx]
except IndexError:
stack.append(None)
else:
if val.startswith('titulo') and val.endswith('1'):
# hard group break!!!
values = {}
stack = []
stack.append(val)
elif act.name in ('ActionSetVariable', 'ActionSetMember'):
if len(stack) == 2:
title, value = stack
if title in keys:
values[title] = value
if len(values) == len(keys):
return values
stack = []
else:
stack = []
def _fix_date(date):
"""Fix and improve the date info."""
datestr = date.split()[0]
if datestr.isupper():
return None
if "-" in datestr:
datestr = "/".join(x.split("-")[0] for x in datestr.split("/"))
dt = datetime.datetime.strptime(datestr, "%d/%m/%y")
date = dt.date()
return date
def _fix_occup(occup):
"""Fix and improve the occupation info."""
occup = occup.strip()
if not occup:
return ""
occup = occup[0].upper() + occup[1:]
if occup[-1] != ".":
occup = occup + "."
# assure all the letters after a period is in uppercase
pos_from = 0
while True:
try:
pos = occup.index(".", pos_from)
except ValueError:
break
pos_from = pos + 1
pos += 2 # second letter after the point
if pos < len(occup):
occup = occup[:pos] + occup[pos].upper() + occup[pos + 1:]
return occup
def _fix_bio(bio):
"""Fix and improve the bio info."""
bio = bio.strip()
return bio
def _fix_name(name):
"""Fix and improve the name info."""
name = name.replace(""", '"')
return name
def scrap(fh, custom_order=None):
"""Get useful info from a program."""
swf = swfparser.SWFParser(fh)
# get the images
base = None
images = []
for tag in swf.tags:
if tag.name == 'JPEGTables':
base = tag.JPEGData
elif tag.name == 'DefineBits':
images.append((tag.CharacterID, tag.JPEGData))
elif tag.name == 'DefineBitsJPEG2':
images.append((tag.CharacterID, tag.ImageData))
images = [base + x[1] for x in sorted(images, reverse=True)]
# get the last DefineSprite
defsprite = None
for tag in swf.tags:
if tag.name == 'DefineSprite':
defsprite = tag
assert tag is not None, "DefineSprite not found"
# get the actions
doaction = defsprite.ControlTags[0]
for act in doaction.Actions:
if act.name == 'ActionConstantPool':
break
else:
if len(images) < 3:
# not enough images and no constant pool: a non-programs swf!
return []
raise ValueError("No ActionConstantPool found!")
# do some magic to retrieve the texts
cpe = _ConstantPoolExtractor(act.ConstantPool, doaction.Actions)
i = 0
all_vals = []
while True:
i += 1
name = 'titulo%d1' % i
occup = 'titulo%d2' % i
bio = 'htmlText'
date = 'titulo%d3' % i
vals = cpe.get(name, occup, bio, date)
if vals is None:
break
all_vals.append((vals[name], vals[occup], vals[bio], vals[date]))
items = []
for i, (name, occup, bio, date) in enumerate(all_vals):
date = _fix_date(date)
if date is None:
continue
occup = _fix_occup(occup)
bio = _fix_bio(bio)
name = _fix_name(name)
# use the corresponding image, or through the custom order
if custom_order is None:
idx = i
else:
try:
idx = custom_order.index(name)
except:
continue
image = images[idx]
ep = Episode(name=name, occup=occup, bio=bio, image=image, date=date)
items.append(ep)
return items
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: scrapers_dqsv.py file.swf")
exit()
custom_order = None
with open(sys.argv[1], 'rb') as fh:
episodes = scrap(fh, custom_order)
for i, ep in enumerate(episodes):
print("Saving img {} for {}".format(i, ep.name))
with open("scraper-img-{}.jpeg".format(i), "wb") as fh:
fh.write(ep.image)
| gpl-3.0 |
si618/pi-time | node_modules/grunt-nose/tasks/lib/nose/plugins/cover.py | 21 | 11133 | """If you have Ned Batchelder's coverage_ module installed, you may activate a
coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
environment variable. The coverage report will cover any python source module
imported after the start of the test run, excluding modules that match
testMatch. If you want to include those modules too, use the ``--cover-tests``
switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
restrict the coverage report to modules from a particular package or packages,
use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
variable.
.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
"""
import logging
import re
import sys
import StringIO
from nose.plugins.base import Plugin
from nose.util import src, tolist
log = logging.getLogger(__name__)
class Coverage(Plugin):
"""
Activate a coverage report using Ned Batchelder's coverage module.
"""
coverTests = False
coverPackages = None
coverInstance = None
coverErase = False
coverMinPercentage = None
score = 200
status = {}
def options(self, parser, env):
"""
Add options to command line.
"""
super(Coverage, self).options(parser, env)
parser.add_option("--cover-package", action="append",
default=env.get('NOSE_COVER_PACKAGE'),
metavar="PACKAGE",
dest="cover_packages",
help="Restrict coverage output to selected packages "
"[NOSE_COVER_PACKAGE]")
parser.add_option("--cover-erase", action="store_true",
default=env.get('NOSE_COVER_ERASE'),
dest="cover_erase",
help="Erase previously collected coverage "
"statistics before run")
parser.add_option("--cover-tests", action="store_true",
dest="cover_tests",
default=env.get('NOSE_COVER_TESTS'),
help="Include test modules in coverage report "
"[NOSE_COVER_TESTS]")
parser.add_option("--cover-min-percentage", action="store",
dest="cover_min_percentage",
default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
help="Minimum percentage of coverage for tests "
"to pass [NOSE_COVER_MIN_PERCENTAGE]")
parser.add_option("--cover-inclusive", action="store_true",
dest="cover_inclusive",
default=env.get('NOSE_COVER_INCLUSIVE'),
help="Include all python files under working "
"directory in coverage report. Useful for "
"discovering holes in test coverage if not all "
"files are imported by the test suite. "
"[NOSE_COVER_INCLUSIVE]")
parser.add_option("--cover-html", action="store_true",
default=env.get('NOSE_COVER_HTML'),
dest='cover_html',
help="Produce HTML coverage information")
parser.add_option('--cover-html-dir', action='store',
default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
dest='cover_html_dir',
metavar='DIR',
help='Produce HTML coverage information in dir')
parser.add_option("--cover-branches", action="store_true",
default=env.get('NOSE_COVER_BRANCHES'),
dest="cover_branches",
help="Include branch coverage in coverage report "
"[NOSE_COVER_BRANCHES]")
parser.add_option("--cover-xml", action="store_true",
default=env.get('NOSE_COVER_XML'),
dest="cover_xml",
help="Produce XML coverage information")
parser.add_option("--cover-xml-file", action="store",
default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
dest="cover_xml_file",
metavar="FILE",
help="Produce XML coverage information in file")
def configure(self, options, conf):
"""
Configure plugin.
"""
try:
self.status.pop('active')
except KeyError:
pass
super(Coverage, self).configure(options, conf)
if conf.worker:
return
if self.enabled:
try:
import coverage
if not hasattr(coverage, 'coverage'):
raise ImportError("Unable to import coverage module")
except ImportError:
log.error("Coverage not available: "
"unable to import coverage module")
self.enabled = False
return
self.conf = conf
self.coverErase = options.cover_erase
self.coverTests = options.cover_tests
self.coverPackages = []
if options.cover_packages:
if isinstance(options.cover_packages, (list, tuple)):
cover_packages = options.cover_packages
else:
cover_packages = [options.cover_packages]
for pkgs in [tolist(x) for x in cover_packages]:
self.coverPackages.extend(pkgs)
self.coverInclusive = options.cover_inclusive
if self.coverPackages:
log.info("Coverage report will include only packages: %s",
self.coverPackages)
self.coverHtmlDir = None
if options.cover_html:
self.coverHtmlDir = options.cover_html_dir
log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
self.coverBranches = options.cover_branches
self.coverXmlFile = None
if options.cover_min_percentage:
self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
if options.cover_xml:
self.coverXmlFile = options.cover_xml_file
log.debug('Will put XML coverage report in %s', self.coverXmlFile)
if self.enabled:
self.status['active'] = True
self.coverInstance = coverage.coverage(auto_data=False,
branch=self.coverBranches, data_suffix=None,
source=self.coverPackages)
def begin(self):
"""
Begin recording coverage information.
"""
log.debug("Coverage begin")
self.skipModules = sys.modules.keys()[:]
if self.coverErase:
log.debug("Clearing previously collected coverage statistics")
self.coverInstance.combine()
self.coverInstance.erase()
self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
self.coverInstance.load()
self.coverInstance.start()
def report(self, stream):
"""
Output code coverage report.
"""
log.debug("Coverage report")
self.coverInstance.stop()
self.coverInstance.combine()
self.coverInstance.save()
modules = [module
for name, module in sys.modules.items()
if self.wantModuleCoverage(name, module)]
log.debug("Coverage report will cover modules: %s", modules)
self.coverInstance.report(modules, file=stream)
import coverage
if self.coverHtmlDir:
log.debug("Generating HTML coverage report")
try:
self.coverInstance.html_report(modules, self.coverHtmlDir)
except coverage.misc.CoverageException, e:
log.warning("Failed to generate HTML report: %s" % str(e))
if self.coverXmlFile:
log.debug("Generating XML coverage report")
try:
self.coverInstance.xml_report(modules, self.coverXmlFile)
except coverage.misc.CoverageException, e:
log.warning("Failed to generate XML report: %s" % str(e))
# make sure we have minimum required coverage
if self.coverMinPercentage:
f = StringIO.StringIO()
self.coverInstance.report(modules, file=f)
multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
r'\s+(\d+)%\s+\d*\s{0,1}$')
singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
m = re.search(multiPackageRe, f.getvalue())
if m is None:
m = re.search(singlePackageRe, f.getvalue())
if m:
percentage = int(m.groups()[0])
if percentage < self.coverMinPercentage:
log.error('TOTAL Coverage did not reach minimum '
'required: %d%%' % self.coverMinPercentage)
sys.exit(1)
else:
log.error("No total percentage was found in coverage output, "
"something went wrong.")
def wantModuleCoverage(self, name, module):
if not hasattr(module, '__file__'):
log.debug("no coverage of %s: no __file__", name)
return False
module_file = src(module.__file__)
if not module_file or not module_file.endswith('.py'):
log.debug("no coverage of %s: not a python file", name)
return False
if self.coverPackages:
for package in self.coverPackages:
if (re.findall(r'^%s\b' % re.escape(package), name)
and (self.coverTests
or not self.conf.testMatch.search(name))):
log.debug("coverage for %s", name)
return True
if name in self.skipModules:
log.debug("no coverage for %s: loaded before coverage start",
name)
return False
if self.conf.testMatch.search(name) and not self.coverTests:
log.debug("no coverage for %s: is a test", name)
return False
# accept any package that passed the previous tests, unless
# coverPackages is on -- in that case, if we wanted this
# module, we would have already returned True
return not self.coverPackages
def wantFile(self, file, package=None):
"""If inclusive coverage enabled, return true for all source files
in wanted packages.
"""
if self.coverInclusive:
if file.endswith(".py"):
if package and self.coverPackages:
for want in self.coverPackages:
if package.startswith(want):
return True
else:
return True
return None
| gpl-3.0 |
browseinfo/odoo_saas3_nicolas | addons/delivery/delivery.py | 1 | 12164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class delivery_carrier(osv.osv):
_name = "delivery.carrier"
_description = "Carrier"
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
if context is None:
context = {}
order_id = context.get('order_id',False)
if not order_id:
res = super(delivery_carrier, self).name_get(cr, uid, ids, context=context)
else:
order = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
currency = order.pricelist_id.currency_id.name or ''
res = [(r['id'], r['name']+' ('+(str(r['price']))+' '+currency+')') for r in self.read(cr, uid, ids, ['name', 'price'], context)]
return res
def get_price(self, cr, uid, ids, field_name, arg=None, context=None):
res={}
if context is None:
context = {}
sale_obj=self.pool.get('sale.order')
grid_obj=self.pool.get('delivery.grid')
for carrier in self.browse(cr, uid, ids, context=context):
order_id=context.get('order_id',False)
price=False
if order_id:
order = sale_obj.browse(cr, uid, order_id, context=context)
carrier_grid=self.grid_get(cr,uid,[carrier.id],order.partner_shipping_id.id,context)
if carrier_grid:
price=grid_obj.get_price(cr, uid, carrier_grid, order, time.strftime('%Y-%m-%d'), context)
else:
price = 0.0
res[carrier.id]=price
return res
_columns = {
'name': fields.char('Delivery Method', size=64, required=True),
'partner_id': fields.many2one('res.partner', 'Transport Company', required=True, help="The partner that is doing the delivery service."),
'product_id': fields.many2one('product.product', 'Delivery Product', required=True),
'grids_id': fields.one2many('delivery.grid', 'carrier_id', 'Delivery Grids'),
'price' : fields.function(get_price, string='Price'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery carrier without removing it."),
'normal_price': fields.float('Normal Price', help="Keep empty if the pricing depends on the advanced pricing per destination"),
'free_if_more_than': fields.boolean('Free If Order Total Amount Is More Than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping"),
'amount': fields.float('Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency"),
'use_detailed_pricelist': fields.boolean('Advanced Pricing per Destination', help="Check this box if you want to manage delivery prices that depends on the destination, the weight, the total of the order, etc."),
'pricelist_ids': fields.one2many('delivery.grid', 'carrier_id', 'Advanced Pricing'),
}
_defaults = {
'active': 1,
'free_if_more_than': False,
}
def grid_get(self, cr, uid, ids, contact_id, context=None):
contact = self.pool.get('res.partner').browse(cr, uid, contact_id, context=context)
for carrier in self.browse(cr, uid, ids, context=context):
for grid in carrier.grids_id:
get_id = lambda x: x.id
country_ids = map(get_id, grid.country_ids)
state_ids = map(get_id, grid.state_ids)
if country_ids and not contact.country_id.id in country_ids:
continue
if state_ids and not contact.state_id.id in state_ids:
continue
if grid.zip_from and (contact.zip or '')< grid.zip_from:
continue
if grid.zip_to and (contact.zip or '')> grid.zip_to:
continue
return grid.id
return False
def create_grid_lines(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
grid_line_pool = self.pool.get('delivery.grid.line')
grid_pool = self.pool.get('delivery.grid')
for record in self.browse(cr, uid, ids, context=context):
# if using advanced pricing per destination: do not change
if record.use_detailed_pricelist:
continue
# not using advanced pricing per destination: override grid
grid_id = grid_pool.search(cr, uid, [('carrier_id', '=', record.id)], context=context)
if grid_id and not (record.normal_price or record.free_if_more_than):
grid_pool.unlink(cr, uid, grid_id, context=context)
# Check that float, else 0.0 is False
if not (isinstance(record.normal_price,float) or record.free_if_more_than):
continue
if not grid_id:
grid_data = {
'name': record.name,
'carrier_id': record.id,
'sequence': 10,
}
grid_id = [grid_pool.create(cr, uid, grid_data, context=context)]
lines = grid_line_pool.search(cr, uid, [('grid_id','in',grid_id)], context=context)
if lines:
grid_line_pool.unlink(cr, uid, lines, context=context)
#create the grid lines
if record.free_if_more_than:
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Free if more than %.2f') % record.amount,
'type': 'price',
'operator': '>=',
'max_value': record.amount,
'standard_price': 0.0,
'list_price': 0.0,
}
grid_line_pool.create(cr, uid, line_data, context=context)
if isinstance(record.normal_price,float):
line_data = {
'grid_id': grid_id and grid_id[0],
'name': _('Default price'),
'type': 'price',
'operator': '>=',
'max_value': 0.0,
'standard_price': record.normal_price,
'list_price': record.normal_price,
}
grid_line_pool.create(cr, uid, line_data, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
res = super(delivery_carrier, self).write(cr, uid, ids, vals, context=context)
self.create_grid_lines(cr, uid, ids, vals, context=context)
return res
def create(self, cr, uid, vals, context=None):
res_id = super(delivery_carrier, self).create(cr, uid, vals, context=context)
self.create_grid_lines(cr, uid, [res_id], vals, context=context)
return res_id
class delivery_grid(osv.osv):
_name = "delivery.grid"
_description = "Delivery Grid"
_columns = {
'name': fields.char('Grid Name', size=64, required=True),
'sequence': fields.integer('Sequence', size=64, required=True, help="Gives the sequence order when displaying a list of delivery grid."),
'carrier_id': fields.many2one('delivery.carrier', 'Carrier', required=True, ondelete='cascade'),
'country_ids': fields.many2many('res.country', 'delivery_grid_country_rel', 'grid_id', 'country_id', 'Countries'),
'state_ids': fields.many2many('res.country.state', 'delivery_grid_state_rel', 'grid_id', 'state_id', 'States'),
'zip_from': fields.char('Start Zip', size=12),
'zip_to': fields.char('To Zip', size=12),
'line_ids': fields.one2many('delivery.grid.line', 'grid_id', 'Grid Line'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the delivery grid without removing it."),
}
_defaults = {
'active': lambda *a: 1,
'sequence': lambda *a: 1,
}
_order = 'sequence'
def get_price(self, cr, uid, id, order, dt, context=None):
total = 0
weight = 0
volume = 0
product_uom_obj = self.pool.get('product.uom')
for line in order.order_line:
if not line.product_id or line.is_delivery:
continue
q = product_uom_obj._compute_qty(cr, uid, line.product_uom.id, line.product_uom_qty, line.product_id.uom_id.id)
weight += (line.product_id.weight or 0.0) * q
volume += (line.product_id.volume or 0.0) * q
total = order.amount_total or 0.0
return self.get_price_from_picking(cr, uid, id, total,weight, volume, context=context)
def get_price_from_picking(self, cr, uid, id, total, weight, volume, context=None):
grid = self.browse(cr, uid, id, context=context)
price = 0.0
ok = False
price_dict = {'price': total, 'volume':volume, 'weight': weight, 'wv':volume*weight}
for line in grid.line_ids:
test = eval(line.type+line.operator+str(line.max_value), price_dict)
if test:
if line.price_type=='variable':
price = line.list_price * price_dict[line.variable_factor]
else:
price = line.list_price
ok = True
break
if not ok:
raise osv.except_osv(_('No price available!'), _('No line matched this product or order in the chosen delivery grid.'))
return price
class delivery_grid_line(osv.osv):
_name = "delivery.grid.line"
_description = "Delivery Grid Line"
_columns = {
'name': fields.char('Name', size=64, required=True),
'grid_id': fields.many2one('delivery.grid', 'Grid',required=True, ondelete='cascade'),
'type': fields.selection([('weight','Weight'),('volume','Volume'),\
('wv','Weight * Volume'), ('price','Price')],\
'Variable', required=True),
'operator': fields.selection([('==','='),('<=','<='),('>=','>=')], 'Operator', required=True),
'max_value': fields.float('Maximum Value', required=True),
'price_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Price Type', required=True),
'variable_factor': fields.selection([('weight','Weight'),('volume','Volume'),('wv','Weight * Volume'), ('price','Price')], 'Variable Factor', required=True),
'list_price': fields.float('Sale Price', digits_compute= dp.get_precision('Product Price'), required=True),
'standard_price': fields.float('Cost Price', digits_compute= dp.get_precision('Product Price'), required=True),
}
_defaults = {
'type': lambda *args: 'weight',
'operator': lambda *args: '<=',
'price_type': lambda *args: 'fixed',
'variable_factor': lambda *args: 'weight',
}
_order = 'list_price'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eeshangarg/oh-mainline | vendor/packages/south/south/logger.py | 129 | 1175 | import sys
import logging
from django.conf import settings
# Create a dummy handler to use for now.
class NullHandler(logging.Handler):
def emit(self, record):
pass
def get_logger():
"Attach a file handler to the logger if there isn't one already."
debug_on = getattr(settings, "SOUTH_LOGGING_ON", False)
logging_file = getattr(settings, "SOUTH_LOGGING_FILE", False)
if debug_on:
if logging_file:
if len(_logger.handlers) < 2:
_logger.addHandler(logging.FileHandler(logging_file))
_logger.setLevel(logging.DEBUG)
else:
raise IOError("SOUTH_LOGGING_ON is True. You also need a SOUTH_LOGGING_FILE setting.")
return _logger
def close_logger():
"Closes the logger handler for the file, so we can remove the file after a test."
for handler in _logger.handlers:
_logger.removeHandler(handler)
if isinstance(handler, logging.FileHandler):
handler.close()
def init_logger():
"Initialize the south logger"
logger = logging.getLogger("south")
logger.addHandler(NullHandler())
return logger
_logger = init_logger()
| agpl-3.0 |
wackymaster/QTClock | Libraries/numpy/doc/constants.py | 172 | 8954 | """
=========
Constants
=========
Numpy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
| mit |
souravsingh/sympy | sympy/printing/tests/test_mathml.py | 21 | 19023 | from sympy import diff, Integral, Limit, sin, Symbol, Integer, Rational, cos, \
tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, E, I, oo, \
pi, GoldenRatio, EulerGamma, Sum, Eq, Ne, Ge, Lt, Float, Matrix
from sympy.printing.mathml import mathml, MathMLPrinter
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
mp = MathMLPrinter()
def test_printmethod():
assert mp.doprint(1 + x) == '<apply><plus/><ci>x</ci><cn>1</cn></apply>'
def test_mathml_core():
mml_1 = mp._print(1 + x)
assert mml_1.nodeName == 'apply'
nodes = mml_1.childNodes
assert len(nodes) == 3
assert nodes[0].nodeName == 'plus'
assert nodes[0].hasChildNodes() is False
assert nodes[0].nodeValue is None
assert nodes[1].nodeName in ['cn', 'ci']
if nodes[1].nodeName == 'cn':
assert nodes[1].childNodes[0].nodeValue == '1'
assert nodes[2].childNodes[0].nodeValue == 'x'
else:
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(x**2)
assert mml_2.nodeName == 'apply'
nodes = mml_2.childNodes
assert nodes[1].childNodes[0].nodeValue == 'x'
assert nodes[2].childNodes[0].nodeValue == '2'
mml_3 = mp._print(2*x)
assert mml_3.nodeName == 'apply'
nodes = mml_3.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '2'
assert nodes[2].childNodes[0].nodeValue == 'x'
mml = mp._print(Float(1.0, 2)*x)
assert mml.nodeName == 'apply'
nodes = mml.childNodes
assert nodes[0].nodeName == 'times'
assert nodes[1].childNodes[0].nodeValue == '1.0'
assert nodes[2].childNodes[0].nodeValue == 'x'
def test_mathml_functions():
mml_1 = mp._print(sin(x))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'sin'
assert mml_1.childNodes[1].nodeName == 'ci'
mml_2 = mp._print(diff(sin(x), x, evaluate=False))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'diff'
assert mml_2.childNodes[1].nodeName == 'bvar'
assert mml_2.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
mml_3 = mp._print(diff(cos(x*y), x, evaluate=False))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'partialdiff'
assert mml_3.childNodes[1].nodeName == 'bvar'
assert mml_3.childNodes[1].childNodes[
0].nodeName == 'ci' # below bvar there's <ci>x/ci>
def test_mathml_limits():
# XXX No unevaluated limits
lim_fun = sin(x)/x
mml_1 = mp._print(Limit(lim_fun, x, 0))
assert mml_1.childNodes[0].nodeName == 'limit'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].toxml() == mp._print(lim_fun).toxml()
def test_mathml_integrals():
integrand = x
mml_1 = mp._print(Integral(integrand, (x, 0, 1)))
assert mml_1.childNodes[0].nodeName == 'int'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(integrand).toxml()
def test_mathml_matrices():
A = Matrix([1, 2, 3])
B = Matrix([[0, 5, 4], [2, 3, 1], [9, 7, 9]])
mll_1 = mp._print(A)
assert mll_1.childNodes[0].nodeName == 'matrixrow'
assert mll_1.childNodes[0].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[0].childNodes[0].childNodes[0].nodeValue == '1'
assert mll_1.childNodes[1].nodeName == 'matrixrow'
assert mll_1.childNodes[1].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_1.childNodes[2].nodeName == 'matrixrow'
assert mll_1.childNodes[2].childNodes[0].nodeName == 'cn'
assert mll_1.childNodes[2].childNodes[0].childNodes[0].nodeValue == '3'
mll_2 = mp._print(B)
assert mll_2.childNodes[0].nodeName == 'matrixrow'
assert mll_2.childNodes[0].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[0].childNodes[0].nodeValue == '0'
assert mll_2.childNodes[0].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[1].childNodes[0].nodeValue == '5'
assert mll_2.childNodes[0].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[0].childNodes[2].childNodes[0].nodeValue == '4'
assert mll_2.childNodes[1].nodeName == 'matrixrow'
assert mll_2.childNodes[1].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[0].childNodes[0].nodeValue == '2'
assert mll_2.childNodes[1].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[1].childNodes[0].nodeValue == '3'
assert mll_2.childNodes[1].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[1].childNodes[2].childNodes[0].nodeValue == '1'
assert mll_2.childNodes[2].nodeName == 'matrixrow'
assert mll_2.childNodes[2].childNodes[0].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[0].childNodes[0].nodeValue == '9'
assert mll_2.childNodes[2].childNodes[1].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[1].childNodes[0].nodeValue == '7'
assert mll_2.childNodes[2].childNodes[2].nodeName == 'cn'
assert mll_2.childNodes[2].childNodes[2].childNodes[0].nodeValue == '9'
def test_mathml_sums():
summand = x
mml_1 = mp._print(Sum(summand, (x, 1, 10)))
assert mml_1.childNodes[0].nodeName == 'sum'
assert mml_1.childNodes[1].nodeName == 'bvar'
assert mml_1.childNodes[2].nodeName == 'lowlimit'
assert mml_1.childNodes[3].nodeName == 'uplimit'
assert mml_1.childNodes[4].toxml() == mp._print(summand).toxml()
def test_mathml_tuples():
mml_1 = mp._print([2])
assert mml_1.nodeName == 'list'
assert mml_1.childNodes[0].nodeName == 'cn'
assert len(mml_1.childNodes) == 1
mml_2 = mp._print([2, Integer(1)])
assert mml_2.nodeName == 'list'
assert mml_2.childNodes[0].nodeName == 'cn'
assert mml_2.childNodes[1].nodeName == 'cn'
assert len(mml_2.childNodes) == 2
def test_mathml_add():
mml = mp._print(x**5 - x**4 + x)
assert mml.childNodes[0].nodeName == 'plus'
assert mml.childNodes[1].childNodes[0].nodeName == 'minus'
assert mml.childNodes[1].childNodes[1].nodeName == 'apply'
def test_mathml_Rational():
mml_1 = mp._print(Rational(1, 1))
"""should just return a number"""
assert mml_1.nodeName == 'cn'
mml_2 = mp._print(Rational(2, 5))
assert mml_2.childNodes[0].nodeName == 'divide'
def test_mathml_constants():
mml = mp._print(I)
assert mml.nodeName == 'imaginaryi'
mml = mp._print(E)
assert mml.nodeName == 'exponentiale'
mml = mp._print(oo)
assert mml.nodeName == 'infinity'
mml = mp._print(pi)
assert mml.nodeName == 'pi'
assert mathml(GoldenRatio) == '<cn>φ</cn>'
mml = mathml(EulerGamma)
assert mml == '<eulergamma/>'
def test_mathml_trig():
mml = mp._print(sin(x))
assert mml.childNodes[0].nodeName == 'sin'
mml = mp._print(cos(x))
assert mml.childNodes[0].nodeName == 'cos'
mml = mp._print(tan(x))
assert mml.childNodes[0].nodeName == 'tan'
mml = mp._print(asin(x))
assert mml.childNodes[0].nodeName == 'arcsin'
mml = mp._print(acos(x))
assert mml.childNodes[0].nodeName == 'arccos'
mml = mp._print(atan(x))
assert mml.childNodes[0].nodeName == 'arctan'
mml = mp._print(sinh(x))
assert mml.childNodes[0].nodeName == 'sinh'
mml = mp._print(cosh(x))
assert mml.childNodes[0].nodeName == 'cosh'
mml = mp._print(tanh(x))
assert mml.childNodes[0].nodeName == 'tanh'
mml = mp._print(asinh(x))
assert mml.childNodes[0].nodeName == 'arcsinh'
mml = mp._print(atanh(x))
assert mml.childNodes[0].nodeName == 'arctanh'
mml = mp._print(acosh(x))
assert mml.childNodes[0].nodeName == 'arccosh'
def test_mathml_relational():
mml_1 = mp._print(Eq(x, 1))
assert mml_1.nodeName == 'apply'
assert mml_1.childNodes[0].nodeName == 'eq'
assert mml_1.childNodes[1].nodeName == 'ci'
assert mml_1.childNodes[1].childNodes[0].nodeValue == 'x'
assert mml_1.childNodes[2].nodeName == 'cn'
assert mml_1.childNodes[2].childNodes[0].nodeValue == '1'
mml_2 = mp._print(Ne(1, x))
assert mml_2.nodeName == 'apply'
assert mml_2.childNodes[0].nodeName == 'neq'
assert mml_2.childNodes[1].nodeName == 'cn'
assert mml_2.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_2.childNodes[2].nodeName == 'ci'
assert mml_2.childNodes[2].childNodes[0].nodeValue == 'x'
mml_3 = mp._print(Ge(1, x))
assert mml_3.nodeName == 'apply'
assert mml_3.childNodes[0].nodeName == 'geq'
assert mml_3.childNodes[1].nodeName == 'cn'
assert mml_3.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_3.childNodes[2].nodeName == 'ci'
assert mml_3.childNodes[2].childNodes[0].nodeValue == 'x'
mml_4 = mp._print(Lt(1, x))
assert mml_4.nodeName == 'apply'
assert mml_4.childNodes[0].nodeName == 'lt'
assert mml_4.childNodes[1].nodeName == 'cn'
assert mml_4.childNodes[1].childNodes[0].nodeValue == '1'
assert mml_4.childNodes[2].nodeName == 'ci'
assert mml_4.childNodes[2].childNodes[0].nodeValue == 'x'
def test_symbol():
mml = mp._print(Symbol("x"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == 'x'
del mml
mml = mp._print(Symbol("x^2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x__2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
del mml
mml = mp._print(Symbol("x^3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x__3_2"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msubsup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeValue == '2'
assert mml.childNodes[0].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[2].childNodes[0].nodeValue == '3'
del mml
mml = mp._print(Symbol("x_2_a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msub'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x^2^a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
mml = mp._print(Symbol("x__2__a"))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeName == 'mml:msup'
assert mml.childNodes[0].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[0].childNodes[0].nodeValue == 'x'
assert mml.childNodes[0].childNodes[1].nodeName == 'mml:mrow'
assert mml.childNodes[0].childNodes[1].childNodes[0].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[0].childNodes[
0].nodeValue == '2'
assert mml.childNodes[0].childNodes[1].childNodes[1].nodeName == 'mml:mo'
assert mml.childNodes[0].childNodes[1].childNodes[1].childNodes[
0].nodeValue == ' '
assert mml.childNodes[0].childNodes[1].childNodes[2].nodeName == 'mml:mi'
assert mml.childNodes[0].childNodes[1].childNodes[2].childNodes[
0].nodeValue == 'a'
del mml
def test_mathml_greek():
mml = mp._print(Symbol('alpha'))
assert mml.nodeName == 'ci'
assert mml.childNodes[0].nodeValue == u'\N{GREEK SMALL LETTER ALPHA}'
assert mp.doprint(Symbol('alpha')) == '<ci>α</ci>'
assert mp.doprint(Symbol('beta')) == '<ci>β</ci>'
assert mp.doprint(Symbol('gamma')) == '<ci>γ</ci>'
assert mp.doprint(Symbol('delta')) == '<ci>δ</ci>'
assert mp.doprint(Symbol('epsilon')) == '<ci>ε</ci>'
assert mp.doprint(Symbol('zeta')) == '<ci>ζ</ci>'
assert mp.doprint(Symbol('eta')) == '<ci>η</ci>'
assert mp.doprint(Symbol('theta')) == '<ci>θ</ci>'
assert mp.doprint(Symbol('iota')) == '<ci>ι</ci>'
assert mp.doprint(Symbol('kappa')) == '<ci>κ</ci>'
assert mp.doprint(Symbol('lambda')) == '<ci>λ</ci>'
assert mp.doprint(Symbol('mu')) == '<ci>μ</ci>'
assert mp.doprint(Symbol('nu')) == '<ci>ν</ci>'
assert mp.doprint(Symbol('xi')) == '<ci>ξ</ci>'
assert mp.doprint(Symbol('omicron')) == '<ci>ο</ci>'
assert mp.doprint(Symbol('pi')) == '<ci>π</ci>'
assert mp.doprint(Symbol('rho')) == '<ci>ρ</ci>'
assert mp.doprint(Symbol('varsigma')) == '<ci>ς</ci>', mp.doprint(Symbol('varsigma'))
assert mp.doprint(Symbol('sigma')) == '<ci>σ</ci>'
assert mp.doprint(Symbol('tau')) == '<ci>τ</ci>'
assert mp.doprint(Symbol('upsilon')) == '<ci>υ</ci>'
assert mp.doprint(Symbol('phi')) == '<ci>φ</ci>'
assert mp.doprint(Symbol('chi')) == '<ci>χ</ci>'
assert mp.doprint(Symbol('psi')) == '<ci>ψ</ci>'
assert mp.doprint(Symbol('omega')) == '<ci>ω</ci>'
assert mp.doprint(Symbol('Alpha')) == '<ci>Α</ci>'
assert mp.doprint(Symbol('Beta')) == '<ci>Β</ci>'
assert mp.doprint(Symbol('Gamma')) == '<ci>Γ</ci>'
assert mp.doprint(Symbol('Delta')) == '<ci>Δ</ci>'
assert mp.doprint(Symbol('Epsilon')) == '<ci>Ε</ci>'
assert mp.doprint(Symbol('Zeta')) == '<ci>Ζ</ci>'
assert mp.doprint(Symbol('Eta')) == '<ci>Η</ci>'
assert mp.doprint(Symbol('Theta')) == '<ci>Θ</ci>'
assert mp.doprint(Symbol('Iota')) == '<ci>Ι</ci>'
assert mp.doprint(Symbol('Kappa')) == '<ci>Κ</ci>'
assert mp.doprint(Symbol('Lambda')) == '<ci>Λ</ci>'
assert mp.doprint(Symbol('Mu')) == '<ci>Μ</ci>'
assert mp.doprint(Symbol('Nu')) == '<ci>Ν</ci>'
assert mp.doprint(Symbol('Xi')) == '<ci>Ξ</ci>'
assert mp.doprint(Symbol('Omicron')) == '<ci>Ο</ci>'
assert mp.doprint(Symbol('Pi')) == '<ci>Π</ci>'
assert mp.doprint(Symbol('Rho')) == '<ci>Ρ</ci>'
assert mp.doprint(Symbol('Sigma')) == '<ci>Σ</ci>'
assert mp.doprint(Symbol('Tau')) == '<ci>Τ</ci>'
assert mp.doprint(Symbol('Upsilon')) == '<ci>Υ</ci>'
assert mp.doprint(Symbol('Phi')) == '<ci>Φ</ci>'
assert mp.doprint(Symbol('Chi')) == '<ci>Χ</ci>'
assert mp.doprint(Symbol('Psi')) == '<ci>Ψ</ci>'
assert mp.doprint(Symbol('Omega')) == '<ci>Ω</ci>'
def test_mathml_order():
expr = x**3 + x**2*y + 3*x*y**3 + y**4
mp = MathMLPrinter({'order': 'lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '3'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '4'
mp = MathMLPrinter({'order': 'rev-lex'})
mml = mp._print(expr)
assert mml.childNodes[1].childNodes[0].nodeName == 'power'
assert mml.childNodes[1].childNodes[1].childNodes[0].data == 'y'
assert mml.childNodes[1].childNodes[2].childNodes[0].data == '4'
assert mml.childNodes[4].childNodes[0].nodeName == 'power'
assert mml.childNodes[4].childNodes[1].childNodes[0].data == 'x'
assert mml.childNodes[4].childNodes[2].childNodes[0].data == '3'
def test_settings():
raises(TypeError, lambda: mathml(Symbol("x"), method="garbage"))
def test_toprettyxml_hooking():
# test that the patch doesn't influence the behavior of the standard library
import xml.dom.minidom
doc = xml.dom.minidom.parseString(
"<apply><plus/><ci>x</ci><cn>1</cn></apply>")
prettyxml_old = doc.toprettyxml()
mp.apply_patch()
mp.restore_patch()
assert prettyxml_old == doc.toprettyxml()
| bsd-3-clause |
jeffreyliu3230/scrapi | scrapi/harvesters/opensiuc.py | 9 | 3307 | """
Harvester for the OpenSIUC API at Southern Illinois University for the SHARE project
More information available here:
https://github.com/CenterForOpenScience/SHARE/blob/master/providers/edu.siu.opensiuc.md
An example API call: http://opensiuc.lib.siu.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc&from=2014-10-09T00:00:00Z
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class OpenSIUCHarvester(OAIHarvester):
short_name = 'opensiuc'
long_name = 'OpenSIUC at the Southern Illinois University Carbondale'
url = 'http://opensiuc.lib.siu.edu/'
base_url = 'http://opensiuc.lib.siu.edu/do/oai/'
property_list = [
'type', 'source', 'format',
'identifier', 'date', 'setSpec'
]
approved_sets = [
'ad_pubs',
'agecon_articles',
'agecon_wp',
'anat_pubs',
'anthro_pubs',
'arch_videos',
'asfn_articles',
'auto_pres',
'ccj_articles',
'cee_pubs',
'chem_mdata',
'chem_pubs',
'cs_pubs',
'cs_sp',
'cwrl_fr',
'dh_articles',
'dh_pres',
'dh_works',
'dissertations',
'ebl',
'ece_articles',
'ece_books',
'ece_confs',
'ece_tr',
'econ_dp',
'econ_pres',
'epse_books',
'epse_confs',
'epse_pubs',
'esh_2014',
'fiaq_pubs',
'fiaq_reports',
'fin_pubs',
'fin_wp',
'for_articles',
'geol_comp',
'geol_pubs',
'gers_pubs',
'gmrc_gc',
'gmrc_nm',
'gs_rp',
'hist_pubs',
'histcw_pp',
'igert_cache',
'igert_reports',
'ijshs_2014',
'im_pubs',
'jcwre',
'kaleidoscope',
'math_aids',
'math_articles',
'math_books',
'math_diss',
'math_grp',
'math_misc',
'math_theses',
'meded_books',
'meded_confs',
'meded_pubs',
'meep_articles',
'micro_pres',
'micro_pubs',
'morris_articles',
'morris_confs',
'morris_surveys',
'music_gradworks',
'ojwed',
'pb_pubs',
'pb_reports',
'phe_pres',
'phe_pubs',
'phys_pubs',
'phys_vids',
'pn_wp',
'pnconfs_2010',
'pnconfs_2011',
'pnconfs_2012',
'ppi_papers',
'ppi_sipolls',
'ppi_statepolls',
'ps_confs',
'ps_dr',
'ps_pubs',
'ps_wp',
'psas_articles',
'psych_diss',
'psych_grp',
'psych_pubs',
'psych_theses',
'reach_posters',
'rehab_pubs',
'safmusiccharts_faculty',
'safmusiccharts_students',
'safmusicpapers_faculty',
'safmusicpapers_students',
'srs_2009',
'theses',
'ucowrconfs_2003',
'ucowrconfs_2004',
'ucowrconfs_2005',
'ucowrconfs_2006',
'ucowrconfs_2007',
'ucowrconfs_2008',
'ucowrconfs_2009',
'ugr_mcnair',
'wed_diss',
'wed_grp',
'wed_theses',
'wrd2011_keynote',
'wrd2011_pres',
'zool_data',
'zool_diss',
'zool_pubs'
]
| apache-2.0 |
ron8hu/spark | dev/run-tests-jenkins.py | 28 | 9192 | #!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
import json
import urllib2
import functools
import subprocess
from sparktestsupport import SPARK_HOME, ERROR_CODES
from sparktestsupport.shellutils import run_cmd
def print_err(msg):
"""
Given a set of arguments, will print them to the STDERR stream
"""
print(msg, file=sys.stderr)
def post_message_to_github(msg, ghprb_pull_id):
print("Attempting to post to Github...")
url = "https://api.github.com/repos/apache/spark/issues/" + ghprb_pull_id + "/comments"
github_oauth_key = os.environ["GITHUB_OAUTH_KEY"]
posted_message = json.dumps({"body": msg})
request = urllib2.Request(url,
headers={
"Authorization": "token %s" % github_oauth_key,
"Content-Type": "application/json"
},
data=posted_message)
try:
response = urllib2.urlopen(request)
if response.getcode() == 201:
print(" > Post successful.")
except urllib2.HTTPError as http_e:
print_err("Failed to post message to Github.")
print_err(" > http_code: %s" % http_e.code)
print_err(" > api_response: %s" % http_e.read())
print_err(" > data: %s" % posted_message)
except urllib2.URLError as url_e:
print_err("Failed to post message to Github.")
print_err(" > urllib2_status: %s" % url_e.reason[1])
print_err(" > data: %s" % posted_message)
def pr_message(build_display_name,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url,
msg,
post_msg=''):
# align the arguments properly for string formatting
str_args = (build_display_name,
msg,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url,
str(' ' + post_msg + '.') if post_msg else '.')
return '**[Test build %s %s](%stestReport)** for PR %s at commit [`%s`](%s)%s' % str_args
def run_pr_checks(pr_tests, ghprb_actual_commit, sha1):
"""
Executes a set of pull request checks to ease development and report issues with various
components such as style, linting, dependencies, compatibilities, etc.
@return a list of messages to post back to Github
"""
# Ensure we save off the current HEAD to revert to
current_pr_head = run_cmd(['git', 'rev-parse', 'HEAD'], return_output=True).strip()
pr_results = list()
for pr_test in pr_tests:
test_name = pr_test + '.sh'
pr_results.append(run_cmd(['bash', os.path.join(SPARK_HOME, 'dev', 'tests', test_name),
ghprb_actual_commit, sha1],
return_output=True).rstrip())
# Ensure, after each test, that we're back on the current PR
run_cmd(['git', 'checkout', '-f', current_pr_head])
return pr_results
def run_tests(tests_timeout):
"""
Runs the `dev/run-tests` script and responds with the correct error message
under the various failure scenarios.
@return a tuple containing the test result code and the result note to post to Github
"""
test_result_code = subprocess.Popen(['timeout',
tests_timeout,
os.path.join(SPARK_HOME, 'dev', 'run-tests')]).wait()
failure_note_by_errcode = {
1: 'executing the `dev/run-tests` script', # error to denote run-tests script failures
ERROR_CODES["BLOCK_GENERAL"]: 'some tests',
ERROR_CODES["BLOCK_RAT"]: 'RAT tests',
ERROR_CODES["BLOCK_SCALA_STYLE"]: 'Scala style tests',
ERROR_CODES["BLOCK_JAVA_STYLE"]: 'Java style tests',
ERROR_CODES["BLOCK_PYTHON_STYLE"]: 'Python style tests',
ERROR_CODES["BLOCK_R_STYLE"]: 'R style tests',
ERROR_CODES["BLOCK_DOCUMENTATION"]: 'to generate documentation',
ERROR_CODES["BLOCK_BUILD"]: 'to build',
ERROR_CODES["BLOCK_BUILD_TESTS"]: 'build dependency tests',
ERROR_CODES["BLOCK_MIMA"]: 'MiMa tests',
ERROR_CODES["BLOCK_SPARK_UNIT_TESTS"]: 'Spark unit tests',
ERROR_CODES["BLOCK_PYSPARK_UNIT_TESTS"]: 'PySpark unit tests',
ERROR_CODES["BLOCK_PYSPARK_PIP_TESTS"]: 'PySpark pip packaging tests',
ERROR_CODES["BLOCK_SPARKR_UNIT_TESTS"]: 'SparkR unit tests',
ERROR_CODES["BLOCK_TIMEOUT"]: 'from timeout after a configured wait of \`%s\`' % (
tests_timeout)
}
if test_result_code == 0:
test_result_note = ' * This patch passes all tests.'
else:
note = failure_note_by_errcode.get(
test_result_code, "due to an unknown error code, %s" % test_result_code)
test_result_note = ' * This patch **fails %s**.' % note
return [test_result_code, test_result_note]
def main():
# Important Environment Variables
# ---
# $ghprbActualCommit
# This is the hash of the most recent commit in the PR.
# The merge-base of this and master is the commit from which the PR was branched.
# $sha1
# If the patch merges cleanly, this is a reference to the merge commit hash
# (e.g. "origin/pr/2606/merge").
# If the patch does not merge cleanly, it is equal to $ghprbActualCommit.
# The merge-base of this and master in the case of a clean merge is the most recent commit
# against master.
ghprb_pull_id = os.environ["ghprbPullId"]
ghprb_actual_commit = os.environ["ghprbActualCommit"]
ghprb_pull_title = os.environ["ghprbPullTitle"]
sha1 = os.environ["sha1"]
# Marks this build as a pull request build.
os.environ["AMP_JENKINS_PRB"] = "true"
# Switch to a Maven-based build if the PR title contains "test-maven":
if "test-maven" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_TOOL"] = "maven"
# Switch the Hadoop profile based on the PR title:
if "test-hadoop2.6" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_PROFILE"] = "hadoop2.6"
if "test-hadoop2.7" in ghprb_pull_title:
os.environ["AMPLAB_JENKINS_BUILD_PROFILE"] = "hadoop2.7"
build_display_name = os.environ["BUILD_DISPLAY_NAME"]
build_url = os.environ["BUILD_URL"]
commit_url = "https://github.com/apache/spark/commit/" + ghprb_actual_commit
# GitHub doesn't auto-link short hashes when submitted via the API, unfortunately. :(
short_commit_hash = ghprb_actual_commit[0:7]
# format: http://linux.die.net/man/1/timeout
# must be less than the timeout configured on Jenkins (currently 300m)
tests_timeout = "250m"
# Array to capture all test names to run on the pull request. These tests are represented
# by their file equivalents in the dev/tests/ directory.
#
# To write a PR test:
# * the file must reside within the dev/tests directory
# * be an executable bash script
# * accept three arguments on the command line, the first being the Github PR long commit
# hash, the second the Github SHA1 hash, and the final the current PR hash
# * and, lastly, return string output to be included in the pr message output that will
# be posted to Github
pr_tests = [
"pr_merge_ability",
"pr_public_classes"
]
# `bind_message_base` returns a function to generate messages for Github posting
github_message = functools.partial(pr_message,
build_display_name,
build_url,
ghprb_pull_id,
short_commit_hash,
commit_url)
# post start message
post_message_to_github(github_message('has started'), ghprb_pull_id)
pr_check_results = run_pr_checks(pr_tests, ghprb_actual_commit, sha1)
test_result_code, test_result_note = run_tests(tests_timeout)
# post end message
result_message = github_message('has finished')
result_message += '\n' + test_result_note + '\n'
result_message += '\n'.join(pr_check_results)
post_message_to_github(result_message, ghprb_pull_id)
sys.exit(test_result_code)
if __name__ == "__main__":
main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.