repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/us/models.py | 230 | 1294 | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from django.contrib.localflavor.us.us_states import STATE_CHOICES
from django.contrib.localflavor.us.us_states import USPS_CHOICES
class USStateField(CharField):
description = _("U.S. state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(USStateField, self).__init__(*args, **kwargs)
class USPostalCodeField(CharField):
description = _("U.S. postal code (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = USPS_CHOICES
kwargs['max_length'] = 2
super(USPostalCodeField, self).__init__(*args, **kwargs)
class PhoneNumberField(CharField):
description = _("Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(PhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| gpl-3.0 |
guerrerocarlos/odoo | openerp/addons/base/res/res_partner.py | 54 | 40829 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from lxml import etree
import math
import pytz
import urlparse
import openerp
from openerp import tools, api
from openerp.osv import osv, fields
from openerp.osv.expression import get_unaccent_wrapper
from openerp.tools.translate import _
ADDRESS_FORMAT_LAYOUTS = {
'%(city)s %(state_code)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="%(city)s" style="width: 50%%"/>
<field name="state_id" class="oe_no_button" placeholder="%(state)s" style="width: 47%%" options='{"no_open": true}'/>
<br/>
<field name="zip" placeholder="%(zip)s"/>
</div>
""",
'%(zip)s %(city)s': """
<div class="address_format">
<field name="zip" placeholder="%(zip)s" style="width: 40%%"/>
<field name="city" placeholder="%(city)s" style="width: 57%%"/>
<br/>
<field name="state_id" class="oe_no_button" placeholder="%(state)s" options='{"no_open": true}'/>
</div>
""",
'%(city)s\n%(state_name)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="%(city)s"/>
<field name="state_id" class="oe_no_button" placeholder="%(state)s" options='{"no_open": true}'/>
<field name="zip" placeholder="%(zip)s"/>
</div>
"""
}
class format_address(object):
@api.model
def fields_view_get_address(self, arch):
fmt = self.env.user.company_id.country_id.address_format or ''
for k, v in ADDRESS_FORMAT_LAYOUTS.items():
if k in fmt:
doc = etree.fromstring(arch)
for node in doc.xpath("//div[@class='address_format']"):
tree = etree.fromstring(v % {'city': _('City'), 'zip': _('ZIP'), 'state': _('State')})
for child in node.xpath("//field"):
if child.attrib.get('modifiers'):
for field in tree.xpath("//field[@name='%s']" % child.attrib.get('name')):
field.attrib['modifiers'] = child.attrib.get('modifiers')
node.getparent().replace(node, tree)
arch = etree.tostring(doc)
break
return arch
@api.model
def _tz_get(self):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
class res_partner_category(osv.Model):
def name_get(self, cr, uid, ids, context=None):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if not isinstance(ids, list):
ids = [ids]
if context is None:
context = {}
if context.get('partner_category_display') == 'short':
return super(res_partner_category, self).name_get(cr, uid, ids, context=context)
res = []
for category in self.browse(cr, uid, ids, context=context):
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
categories = self.search(args, limit=limit)
return categories.name_get()
@api.multi
def _name_get_fnc(self, field_name, arg):
return dict(self.name_get())
_description = 'Partner Tags'
_name = 'res.partner.category'
_columns = {
'name': fields.char('Category Name', required=True, translate=True),
'parent_id': fields.many2one('res.partner.category', 'Parent Category', select=True, ondelete='cascade'),
'complete_name': fields.function(_name_get_fnc, type="char", string='Full Name'),
'child_ids': fields.one2many('res.partner.category', 'parent_id', 'Child Categories'),
'active': fields.boolean('Active', help="The active field allows you to hide the category without removing it."),
'parent_left': fields.integer('Left parent', select=True),
'parent_right': fields.integer('Right parent', select=True),
'partner_ids': fields.many2many('res.partner', id1='category_id', id2='partner_id', string='Partners'),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You can not create recursive categories.', ['parent_id'])
]
_defaults = {
'active': 1,
}
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
class res_partner_title(osv.osv):
_name = 'res.partner.title'
_order = 'name'
_columns = {
'name': fields.char('Title', required=True, translate=True),
'shortcut': fields.char('Abbreviation', translate=True),
'domain': fields.selection([('partner', 'Partner'), ('contact', 'Contact')], 'Domain', required=True)
}
_defaults = {
'domain': 'contact',
}
@api.model
def _lang_get(self):
languages = self.env['res.lang'].search([])
return [(language.code, language.name) for language in languages]
# fields copy if 'use_parent_address' is checked
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
class res_partner(osv.Model, format_address):
_description = 'Partner'
_name = "res.partner"
def _address_display(self, cr, uid, ids, name, args, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = self._display_address(cr, uid, partner, context=context)
return res
@api.multi
def _get_tz_offset(self, name, args):
return dict(
(p.id, datetime.datetime.now(pytz.timezone(p.tz or 'GMT')).strftime('%z'))
for p in self)
@api.multi
def _get_image(self, name, args):
return dict((p.id, tools.image_get_resized_images(p.image)) for p in self)
@api.one
def _set_image(self, name, value, args):
return self.write({'image': tools.image_resize_image_big(value)})
@api.multi
def _has_image(self, name, args):
return dict((p.id, bool(p.image)) for p in self)
def _commercial_partner_compute(self, cr, uid, ids, name, args, context=None):
""" Returns the partner that is considered the commercial
entity of this partner. The commercial entity holds the master data
for all commercial fields (see :py:meth:`~_commercial_fields`) """
result = dict.fromkeys(ids, False)
for partner in self.browse(cr, uid, ids, context=context):
current_partner = partner
while not current_partner.is_company and current_partner.parent_id:
current_partner = current_partner.parent_id
result[partner.id] = current_partner.id
return result
def _display_name_compute(self, cr, uid, ids, name, args, context=None):
context = dict(context or {})
context.pop('show_address', None)
context.pop('show_address_only', None)
context.pop('show_email', None)
return dict(self.name_get(cr, uid, ids, context=context))
# indirections to avoid passing a copy of the overridable method when declaring the function field
_commercial_partner_id = lambda self, *args, **kwargs: self._commercial_partner_compute(*args, **kwargs)
_display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs)
_commercial_partner_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)),
['parent_id', 'is_company'], 10)
}
_display_name_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)),
['parent_id', 'is_company', 'name'], 10)
}
_order = "display_name"
_columns = {
'name': fields.char('Name', required=True, select=True),
'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers, select=True),
'date': fields.date('Date', select=1),
'title': fields.many2one('res.partner.title', 'Title'),
'parent_id': fields.many2one('res.partner', 'Related Company', select=True),
'parent_name': fields.related('parent_id', 'name', type='char', readonly=True, string='Parent name'),
'child_ids': fields.one2many('res.partner', 'parent_id', 'Contacts', domain=[('active','=',True)]), # force "active_test" domain to bypass _search() override
'ref': fields.char('Contact Reference', select=1),
'lang': fields.selection(_lang_get, 'Language',
help="If the selected language is loaded in the system, all documents related to this contact will be printed in this language. If not, it will be English."),
'tz': fields.selection(_tz_get, 'Timezone', size=64,
help="The partner's timezone, used to output proper date and time values inside printed reports. "
"It is important to set a value for this field. You should use the same timezone "
"that is otherwise used to pick and render date and time values: your computer's timezone."),
'tz_offset': fields.function(_get_tz_offset, type='char', size=5, string='Timezone offset', invisible=True),
'user_id': fields.many2one('res.users', 'Salesperson', help='The internal user that is in charge of communicating with this contact if any.'),
'vat': fields.char('TIN', help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'bank_ids': fields.one2many('res.partner.bank', 'partner_id', 'Banks'),
'website': fields.char('Website', help="Website of Partner or Company"),
'comment': fields.text('Notes'),
'category_id': fields.many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags'),
'credit_limit': fields.float(string='Credit Limit'),
'ean13': fields.char('EAN13', size=13),
'active': fields.boolean('Active'),
'customer': fields.boolean('Customer', help="Check this box if this contact is a customer."),
'supplier': fields.boolean('Supplier', help="Check this box if this contact is a supplier. If it's not checked, purchase people will not see it when encoding a purchase order."),
'employee': fields.boolean('Employee', help="Check this box if this contact is an Employee."),
'function': fields.char('Job Position'),
'type': fields.selection([('default', 'Default'), ('invoice', 'Invoice'),
('delivery', 'Shipping'), ('contact', 'Contact'),
('other', 'Other')], 'Address Type',
help="Used to select automatically the right address according to the context in sales and purchases documents."),
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', size=24, change_default=True),
'city': fields.char('City'),
'state_id': fields.many2one("res.country.state", 'State', ondelete='restrict'),
'country_id': fields.many2one('res.country', 'Country', ondelete='restrict'),
'email': fields.char('Email'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'mobile': fields.char('Mobile'),
'birthdate': fields.char('Birthdate'),
'is_company': fields.boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person"),
'use_parent_address': fields.boolean('Use Company Address', help="Select this if you want to set company's address information for this contact"),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px"),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'has_image': fields.function(_has_image, type="boolean"),
'company_id': fields.many2one('res.company', 'Company', select=1),
'color': fields.integer('Color Index'),
'user_ids': fields.one2many('res.users', 'partner_id', 'Users'),
'contact_address': fields.function(_address_display, type='char', string='Complete Address'),
# technical field used for managing commercial fields
'commercial_partner_id': fields.function(_commercial_partner_id, type='many2one', relation='res.partner', string='Commercial Entity', store=_commercial_partner_store_triggers)
}
@api.model
def _default_category(self):
category_id = self.env.context.get('category_id', False)
return [category_id] if category_id else False
@api.model
def _get_default_image(self, is_company, colorize=False):
img_path = openerp.modules.get_module_resource(
'base', 'static/src/img', 'company_image.png' if is_company else 'avatar.png')
with open(img_path, 'rb') as f:
image = f.read()
# colorize user avatars
if not is_company:
image = tools.image_colorize(image)
return tools.image_resize_image_big(image.encode('base64'))
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if (not view_id) and (view_type=='form') and context and context.get('force_email', False):
view_id = self.pool['ir.model.data'].get_object_reference(cr, user, 'base', 'view_partner_simple_form')[1]
res = super(res_partner,self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
@api.model
def _default_company(self):
return self.env['res.company']._company_default_get('res.partner')
_defaults = {
'active': True,
'lang': api.model(lambda self: self.env.lang),
'tz': api.model(lambda self: self.env.context.get('tz', False)),
'customer': True,
'category_id': _default_category,
'company_id': _default_company,
'color': 0,
'is_company': False,
'type': 'contact', # type 'default' is wildcard and thus inappropriate
'use_parent_address': False,
'image': False,
}
_constraints = [
(osv.osv._check_recursion, 'You cannot create recursive Partner hierarchies.', ['parent_id']),
]
@api.one
def copy(self, default=None):
default = dict(default or {})
default['name'] = _('%s (copy)') % self.name
return super(res_partner, self).copy(default)
@api.multi
def onchange_type(self, is_company):
value = {'title': False}
if is_company:
value['use_parent_address'] = False
domain = {'title': [('domain', '=', 'partner')]}
else:
domain = {'title': [('domain', '=', 'contact')]}
return {'value': value, 'domain': domain}
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
def value_or_id(val):
""" return val or val.id if val is a browse record """
return val if isinstance(val, (bool, int, long, float, basestring)) else val.id
result = {}
if parent_id:
if ids:
partner = self.browse(cr, uid, ids[0], context=context)
if partner.parent_id and partner.parent_id.id != parent_id:
result['warning'] = {'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if use_parent_address:
parent = self.browse(cr, uid, parent_id, context=context)
address_fields = self._address_fields(cr, uid, context=context)
result['value'] = dict((key, value_or_id(parent[key])) for key in address_fields)
else:
result['value'] = {'use_parent_address': False}
return result
@api.multi
def onchange_state(self, state_id):
if state_id:
state = self.env['res.country.state'].browse(state_id)
return {'value': {'country_id': state.country_id.id}}
return {}
def _check_ean_key(self, cr, uid, ids, context=None):
for partner_o in self.pool['res.partner'].read(cr, uid, ids, ['ean13',]):
thisean=partner_o['ean13']
if thisean and thisean!='':
if len(thisean)!=13:
return False
sum=0
for i in range(12):
if not (i % 2):
sum+=int(thisean[i])
else:
sum+=3*int(thisean[i])
if math.ceil(sum/10.0)*10-sum!=int(thisean[12]):
return False
return True
# _constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean13'])]
def _update_fields_values(self, cr, uid, partner, fields, context=None):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for fname in fields:
field = self._fields[fname]
if field.type == 'one2many':
raise AssertionError('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`')
if field.type == 'many2one':
values[fname] = partner[fname].id if partner[fname] else False
elif field.type == 'many2many':
values[fname] = [(6,0,[r.id for r in partner[fname] or []])]
else:
values[fname] = partner[fname]
return values
def _address_fields(self, cr, uid, context=None):
""" Returns the list of address fields that are synced from the parent
when the `use_parent_address` flag is set. """
return list(ADDRESS_FIELDS)
def update_address(self, cr, uid, ids, vals, context=None):
address_fields = self._address_fields(cr, uid, context=context)
addr_vals = dict((key, vals[key]) for key in address_fields if key in vals)
if addr_vals:
return super(res_partner, self).write(cr, uid, ids, addr_vals, context)
def _commercial_fields(self, cr, uid, context=None):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat', 'credit_limit']
def _commercial_sync_from_company(self, cr, uid, partner, context=None):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = partner.commercial_partner_id
if not commercial_partner:
# On child partner creation of a parent partner,
# the commercial_partner_id is not yet computed
commercial_partner_id = self._commercial_partner_compute(
cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id]
commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context)
if commercial_partner != partner:
commercial_fields = self._commercial_fields(cr, uid, context=context)
sync_vals = self._update_fields_values(cr, uid, commercial_partner,
commercial_fields, context=context)
partner.write(sync_vals)
def _commercial_sync_to_children(self, cr, uid, partner, context=None):
""" Handle sync of commercial fields to descendants """
commercial_fields = self._commercial_fields(cr, uid, context=context)
commercial_partner = partner.commercial_partner_id
if not commercial_partner:
# On child partner creation of a parent partner,
# the commercial_partner_id is not yet computed
commercial_partner_id = self._commercial_partner_compute(
cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id]
commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context)
sync_vals = self._update_fields_values(cr, uid, commercial_partner,
commercial_fields, context=context)
sync_children = [c for c in partner.child_ids if not c.is_company]
for child in sync_children:
self._commercial_sync_to_children(cr, uid, child, context=context)
return self.write(cr, uid, [c.id for c in sync_children], sync_vals, context=context)
def _fields_sync(self, cr, uid, partner, update_values, context=None):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if update_values.get('parent_id') or update_values.get('use_parent_address'):
# 1a. Commercial fields: sync if parent changed
if update_values.get('parent_id'):
self._commercial_sync_from_company(cr, uid, partner, context=context)
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if partner.parent_id and partner.use_parent_address:
onchange_vals = self.onchange_address(cr, uid, [partner.id],
use_parent_address=partner.use_parent_address,
parent_id=partner.parent_id.id,
context=context).get('value', {})
partner.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
if partner.child_ids:
# 2a. Commercial Fields: sync if commercial entity
if partner.commercial_partner_id == partner:
commercial_fields = self._commercial_fields(cr, uid,
context=context)
if any(field in update_values for field in commercial_fields):
self._commercial_sync_to_children(cr, uid, partner,
context=context)
# 2b. Address fields: sync if address changed
address_fields = self._address_fields(cr, uid, context=context)
if any(field in update_values for field in address_fields):
domain_children = [('parent_id', '=', partner.id), ('use_parent_address', '=', True)]
update_ids = self.search(cr, uid, domain_children, context=context)
self.update_address(cr, uid, update_ids, update_values, context=context)
def _handle_first_contact_creation(self, cr, uid, partner, context=None):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = partner.parent_id
address_fields = self._address_fields(cr, uid, context=context)
if parent and (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(partner[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(cr, uid, partner, address_fields, context=context)
parent.update_address(addr_vals)
if not parent.is_company:
parent.write({'is_company': True})
def unlink(self, cr, uid, ids, context=None):
orphan_contact_ids = self.search(cr, uid,
[('parent_id', 'in', ids), ('id', 'not in', ids), ('use_parent_address', '=', True)], context=context)
if orphan_contact_ids:
# no longer have a parent address
self.write(cr, uid, orphan_contact_ids, {'use_parent_address': False}, context=context)
return super(res_partner, self).unlink(cr, uid, ids, context=context)
def _clean_website(self, website):
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(website)
if not scheme:
if not netloc:
netloc, path = path, ''
website = urlparse.urlunparse(('http', netloc, path, params, query, fragment))
return website
@api.multi
def write(self, vals):
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('company_id'):
company = self.env['res.company'].browse(vals['company_id'])
for partner in self:
if partner.user_ids:
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise osv.except_osv(_("Warning"),_("You can not change the company as the partner/user has multiple user linked with different companies."))
result = super(res_partner, self).write(vals)
for partner in self:
self._fields_sync(partner, vals)
return result
@api.model
def create(self, vals):
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
partner = super(res_partner, self).create(vals)
self._fields_sync(partner, vals)
self._handle_first_contact_creation(partner)
return partner
def open_commercial_entity(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Company" button in partner views """
partner = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': partner.commercial_partner_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def open_parent(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Parent" button in partner views """
partner = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': partner.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_name, name)
if context.get('show_address_only'):
name = self._display_address(cr, uid, record, without_company=True, context=context)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def _parse_partner_name(self, text, context=None):
""" Supported syntax:
- 'Raoul <raoul@grosbedon.fr>': will find name and email address
- otherwise: default, everything is set as the name """
emails = tools.email_split(text.replace(' ',','))
if emails:
email = emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
else:
name, email = text, ''
return name, email
def name_create(self, cr, uid, name, context=None):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
if context is None:
context = {}
name, email = self._parse_partner_name(name, context=context)
if context.get('force_email') and not email:
raise osv.except_osv(_('Warning'), _("Couldn't create contact without email address!"))
if not name and email:
name = email
rec_id = self.create(cr, uid, {self._rec_name: name or email, 'email': email or False}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
context = dict(context or {}, active_test=False)
return super(res_partner, self)._search(cr, user, args, offset=offset, limit=limit, order=order, context=context,
count=count, access_rights_uid=access_rights_uid)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
query = """SELECT id
FROM res_partner
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent})
ORDER BY {display_name}
""".format(where=where_str, operator=operator,
email=unaccent('email'),
display_name=unaccent('display_name'),
percent=unaccent('%s'))
where_clause_params += [search_name, search_name]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
def find_or_create(self, cr, uid, email, context=None):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create one
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <r.g@grosbedon.fr>"``"""
assert email, 'an email is required for find_or_create to work'
emails = tools.email_split(email)
if emails:
email = emails[0]
ids = self.search(cr, uid, [('email','ilike',email)], context=context)
if not ids:
return self.name_create(cr, uid, email, context=context)[0]
return ids[0]
def _email_send(self, cr, uid, ids, email_from, subject, body, on_error=None):
partners = self.browse(cr, uid, ids)
for partner in partners:
if partner.email:
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
def email_send(self, cr, uid, ids, email_from, subject, body, on_error=''):
while len(ids):
self.pool['ir.cron'].create(cr, uid, {
'name': 'Send Partner Emails',
'user_id': uid,
'model': 'res.partner',
'function': '_email_send',
'args': repr([ids[:16], email_from, subject, body, on_error])
})
ids = ids[16:]
return True
def address_get(self, cr, uid, ids, adr_pref=None, context=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'default' not in adr_pref:
adr_pref.add('default')
result = {}
visited = set()
for partner in self.browse(cr, uid, filter(None, ids), context=context):
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'default' or the partner itself
default = result.get('default', partner.id)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
def view_header_get(self, cr, uid, view_id, view_type, context):
res = super(res_partner, self).view_header_get(cr, uid, view_id, view_type, context)
if res: return res
if not context.get('category_id', False):
return False
return _('Partners: ')+self.pool['res.partner.category'].browse(cr, uid, context['category_id'], context).name
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
def _display_address(self, cr, uid, address, without_company=False, context=None):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
# get the information that will be injected into the display format
# get the address format
address_format = address.country_id.address_format or \
"%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
args = {
'state_code': address.state_id.code or '',
'state_name': address.state_id.name or '',
'country_code': address.country_id.code or '',
'country_name': address.country_id.name or '',
'company_name': address.parent_name or '',
}
for field in self._address_fields(cr, uid, context=context):
args[field] = getattr(address, field) or ''
if without_company:
args['company_name'] = ''
elif address.parent_id:
address_format = '%(company_name)s\n' + address_format
return address_format % args
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
theceremony/pyramids-installation | python/projector-sync/GIFMode.py | 1 | 2166 | # -*- coding: utf-8 -*-
import os
import pygame
from GIFImage import GIFImage
from random import randint
imagePath = "img/pyramids gifs/disp/"
class GIFMode:
def __init__(self):
self.surface = None
self.currentGIF = None
self.nextGIF = None
self.loadingNextGIF = False
self.files = os.listdir(imagePath)
self.isActive = True
self.playTime = 200
self.currentTime = 0
def set_surface(self, surface):
self.surface = surface
def play_next_GIF(self):
print('PLAY NEXT CALLED')
self.currentGIF = self.nextGIF
self.currentGIF.play()
self.nextGIF = None
def load_next_GIF(self):
print('LOAD NEXT CALLED')
self.loadingNextGIF = True
self.nextGIF = GIFImage(self.choose_next_GIF())
modes = pygame.display.list_modes()
self.nextGIF.scale_image(modes[0])
print(self.nextGIF.loaded)
def choose_next_GIF(self):
return (imagePath + self.files[randint(0, len(self.files) - 1)])
def render_current(self):
if self.currentGIF is not None:
#print('I like to render')
self.currentGIF.render(self.surface, (0, 0))
self.currentTime += 1
def run(self):
if self.isActive is True:
# Check to see if next gif is loaded ----------------------------------
if self.nextGIF is None:
self.load_next_GIF()
elif self.loadingNextGIF is True:
if self.nextGIF.loaded is True:
self.nextGIF.pause()
self.loadingNextGIF = False
#----------------------------------------------------------------------
if self.nextGIF is not None and self.currentGIF is None:
self.play_next_GIF()
#----------------------------------------------------------------------
self.render_current()
#----------------------------------------------------------------------
if self.currentTime >= self.playTime:
self.currentGIF = None
self.currentTime = 0 | mit |
JIMhackKING/face_detect | scripts/encrypt.py | 1 | 2480 | # coding:utf-8
"""这个程序用于进行字符串混淆加密,完美支持英文和中文。
可以加密所有字符串,例如账号、密码或者是一些敏感的词汇,甚至是一个文件。
最重要的是可以多次加密同一段字符,加密加密后的内容。
此版本为 Python3 ,base64 需要用 bytes 类型,如果是 Python2 不需要编码和解码.
这个程序同样也可以加密一个文件"""
__author__ = "JIMhackKING"
# this 的 d 很好的做了代码混淆的字典,可是在导入时会打印一段内容
# from this import d
import base64
import json
d = {'A': 'N', 'C': 'P', 'B': 'O', 'E': 'R', 'D': 'Q', 'G': 'T', 'F': 'S', 'I': 'V',
'H': 'U', 'K': 'X', 'J': 'W', 'M': 'Z', 'L': 'Y', 'O': 'B', 'N': 'A', 'Q': 'D',
'P': 'C', 'S': 'F', 'R': 'E', 'U': 'H', 'T': 'G', 'W': 'J', 'V': 'I', 'Y': 'L',
'X': 'K', 'Z': 'M', 'a': 'n', 'c': 'p', 'b': 'o', 'e': 'r', 'd': 'q', 'g': 't',
'f': 's', 'i': 'v', 'h': 'u', 'k': 'x', 'j': 'w', 'm': 'z', 'l': 'y', 'o': 'b',
'n': 'a', 'q': 'd', 'p': 'c', 's': 'f', 'r': 'e', 'u': 'h', 't': 'g', 'w': 'j',
'v': 'i', 'y': 'l', 'x': 'k', 'z': 'm'}
# 给定一个字符串,对该字符串进行混淆和加密,防止明文密码出现在源码或程序输出中
def str_encode(string):
# 创建第一个新的空字符变量,储存第一次混淆后的字符串(字符替换)
new_string1 = ""
for i in string:
try:
new_string1 += d[i]
except:
new_string1 += i
# 创建第二个新的空字符变量,储存第二次混淆后的字符串(ASCII码转换)
new_string2 = ""
for i in new_string1:
new_string2 += str(ord(i)) + ",~!"
# 创建第三个新的空字符变量,储存第三次混淆后的字符串(base64加密)
last_string = base64.b64encode(new_string2)
return last_string
def str_decode(string):
# 创建第一个变量储存 base64 解码后的内容
new_string1 = base64.b64decode(string)
# 创建第二个变量储存 ASCII 码转换后的内容
new_string2 = ''.join([chr(int(i)) for i in new_string1.split(",~!")[:-1]])
# 创建第三个变量春粗字符替换后的最终结果
last_string = ""
for i in new_string2:
try:
last_string += d[i]
except:
last_string += i
return last_string
if __name__ == '__main__':
API_key = ""
Secret_key = "我"
# 为了保密,这里不给出明文的 API Key 和 Secret Key
s1 = str_encode(API_key)
print str_decode(s1)
s2 = str_encode(Secret_key)
print str_decode(s2)
| mit |
zenodo/zenodo | tests/unit/records/test_schemas_dcat.py | 1 | 1687 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2019 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test the DCAT serializer."""
from __future__ import absolute_import, print_function
from zenodo.modules.records.serializers import dcat_v1
def test_dcat_serializer(db, es, record_with_bucket):
"""Tests the DCAT XSLT-based serializer."""
pid, record = record_with_bucket
serialized_record = dcat_v1.serialize(pid, record)
assert record['title'] in serialized_record
assert record['description'] in serialized_record
assert record['doi'] in serialized_record
for creator in record['creators']:
assert creator['familyname'] in serialized_record
assert creator['givennames'] in serialized_record
for f in record['_files']:
assert f['key'] in serialized_record
| gpl-2.0 |
JoKaWare/GViews | tools/grit/grit/clique.py | 34 | 17957 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Collections of messages and their translations, called cliques. Also
collections of cliques (uber-cliques).
'''
import re
import types
from grit import constants
from grit import exception
from grit import lazy_re
from grit import pseudo
from grit import pseudo_rtl
from grit import tclib
class UberClique(object):
'''A factory (NOT a singleton factory) for making cliques. It has several
methods for working with the cliques created using the factory.
'''
def __init__(self):
# A map from message ID to list of cliques whose source messages have
# that ID. This will contain all cliques created using this factory.
# Different messages can have the same ID because they have the
# same translateable portion and placeholder names, but occur in different
# places in the resource tree.
#
# Each list of cliques is kept sorted by description, to achieve
# stable results from the BestClique method, see below.
self.cliques_ = {}
# A map of clique IDs to list of languages to indicate translations where we
# fell back to English.
self.fallback_translations_ = {}
# A map of clique IDs to list of languages to indicate missing translations.
self.missing_translations_ = {}
def _AddMissingTranslation(self, lang, clique, is_error):
tl = self.fallback_translations_
if is_error:
tl = self.missing_translations_
id = clique.GetId()
if id not in tl:
tl[id] = {}
if lang not in tl[id]:
tl[id][lang] = 1
def HasMissingTranslations(self):
return len(self.missing_translations_) > 0
def MissingTranslationsReport(self):
'''Returns a string suitable for printing to report missing
and fallback translations to the user.
'''
def ReportTranslation(clique, langs):
text = clique.GetMessage().GetPresentableContent()
# The text 'error' (usually 'Error:' but we are conservative)
# can trigger some build environments (Visual Studio, we're
# looking at you) to consider invocation of grit to have failed,
# so we make sure never to output that word.
extract = re.sub('(?i)error', 'REDACTED', text[0:40])[0:40]
ellipsis = ''
if len(text) > 40:
ellipsis = '...'
langs_extract = langs[0:6]
describe_langs = ','.join(langs_extract)
if len(langs) > 6:
describe_langs += " and %d more" % (len(langs) - 6)
return " %s \"%s%s\" %s" % (clique.GetId(), extract, ellipsis,
describe_langs)
lines = []
if len(self.fallback_translations_):
lines.append(
"WARNING: Fell back to English for the following translations:")
for (id, langs) in self.fallback_translations_.items():
lines.append(ReportTranslation(self.cliques_[id][0], langs.keys()))
if len(self.missing_translations_):
lines.append("ERROR: The following translations are MISSING:")
for (id, langs) in self.missing_translations_.items():
lines.append(ReportTranslation(self.cliques_[id][0], langs.keys()))
return '\n'.join(lines)
def MakeClique(self, message, translateable=True):
'''Create a new clique initialized with a message.
Args:
message: tclib.Message()
translateable: True | False
'''
clique = MessageClique(self, message, translateable)
# Enable others to find this clique by its message ID
if message.GetId() in self.cliques_:
presentable_text = clique.GetMessage().GetPresentableContent()
if not message.HasAssignedId():
for c in self.cliques_[message.GetId()]:
assert c.GetMessage().GetPresentableContent() == presentable_text
self.cliques_[message.GetId()].append(clique)
# We need to keep each list of cliques sorted by description, to
# achieve stable results from the BestClique method, see below.
self.cliques_[message.GetId()].sort(
key=lambda c:c.GetMessage().GetDescription())
else:
self.cliques_[message.GetId()] = [clique]
return clique
def FindCliqueAndAddTranslation(self, translation, language):
'''Adds the specified translation to the clique with the source message
it is a translation of.
Args:
translation: tclib.Translation()
language: 'en' | 'fr' ...
Return:
True if the source message was found, otherwise false.
'''
if translation.GetId() in self.cliques_:
for clique in self.cliques_[translation.GetId()]:
clique.AddTranslation(translation, language)
return True
else:
return False
def BestClique(self, id):
'''Returns the "best" clique from a list of cliques. All the cliques
must have the same ID. The "best" clique is chosen in the following
order of preference:
- The first clique that has a non-ID-based description.
- If no such clique found, the first clique with an ID-based description.
- Otherwise the first clique.
This method is stable in terms of always returning a clique with
an identical description (on different runs of GRIT on the same
data) because self.cliques_ is sorted by description.
'''
clique_list = self.cliques_[id]
clique_with_id = None
clique_default = None
for clique in clique_list:
if not clique_default:
clique_default = clique
description = clique.GetMessage().GetDescription()
if description and len(description) > 0:
if not description.startswith('ID:'):
# this is the preferred case so we exit right away
return clique
elif not clique_with_id:
clique_with_id = clique
if clique_with_id:
return clique_with_id
else:
return clique_default
def BestCliquePerId(self):
'''Iterates over the list of all cliques and returns the best clique for
each ID. This will be the first clique with a source message that has a
non-empty description, or an arbitrary clique if none of them has a
description.
'''
for id in self.cliques_:
yield self.BestClique(id)
def BestCliqueByOriginalText(self, text, meaning):
'''Finds the "best" (as in BestClique()) clique that has original text
'text' and meaning 'meaning'. Returns None if there is no such clique.
'''
# If needed, this can be optimized by maintaining a map of
# fingerprints of original text+meaning to cliques.
for c in self.BestCliquePerId():
msg = c.GetMessage()
if msg.GetRealContent() == text and msg.GetMeaning() == meaning:
return msg
return None
def AllMessageIds(self):
'''Returns a list of all defined message IDs.
'''
return self.cliques_.keys()
def AllCliques(self):
'''Iterates over all cliques. Note that this can return multiple cliques
with the same ID.
'''
for cliques in self.cliques_.values():
for c in cliques:
yield c
def GenerateXtbParserCallback(self, lang, debug=False):
'''Creates a callback function as required by grit.xtb_reader.Parse().
This callback will create Translation objects for each message from
the XTB that exists in this uberclique, and add them as translations for
the relevant cliques. The callback will add translations to the language
specified by 'lang'
Args:
lang: 'fr'
debug: True | False
'''
def Callback(id, structure):
if id not in self.cliques_:
if debug: print "Ignoring translation #%s" % id
return
if debug: print "Adding translation #%s" % id
# We fetch placeholder information from the original message (the XTB file
# only contains placeholder names).
original_msg = self.BestClique(id).GetMessage()
translation = tclib.Translation(id=id)
for is_ph,text in structure:
if not is_ph:
translation.AppendText(text)
else:
found_placeholder = False
for ph in original_msg.GetPlaceholders():
if ph.GetPresentation() == text:
translation.AppendPlaceholder(tclib.Placeholder(
ph.GetPresentation(), ph.GetOriginal(), ph.GetExample()))
found_placeholder = True
break
if not found_placeholder:
raise exception.MismatchingPlaceholders(
'Translation for message ID %s had <ph name="%s"/>, no match\n'
'in original message' % (id, text))
self.FindCliqueAndAddTranslation(translation, lang)
return Callback
class CustomType(object):
'''A base class you should implement if you wish to specify a custom type
for a message clique (i.e. custom validation and optional modification of
translations).'''
def Validate(self, message):
'''Returns true if the message (a tclib.Message object) is valid,
otherwise false.
'''
raise NotImplementedError()
def ValidateAndModify(self, lang, translation):
'''Returns true if the translation (a tclib.Translation object) is valid,
otherwise false. The language is also passed in. This method may modify
the translation that is passed in, if it so wishes.
'''
raise NotImplementedError()
def ModifyTextPart(self, lang, text):
'''If you call ModifyEachTextPart, it will turn around and call this method
for each text part of the translation. You should return the modified
version of the text, or just the original text to not change anything.
'''
raise NotImplementedError()
def ModifyEachTextPart(self, lang, translation):
'''Call this to easily modify one or more of the textual parts of a
translation. It will call ModifyTextPart for each part of the
translation.
'''
contents = translation.GetContent()
for ix in range(len(contents)):
if (isinstance(contents[ix], types.StringTypes)):
contents[ix] = self.ModifyTextPart(lang, contents[ix])
class OneOffCustomType(CustomType):
'''A very simple custom type that performs the validation expressed by
the input expression on all languages including the source language.
The expression can access the variables 'lang', 'msg' and 'text()' where 'lang'
is the language of 'msg', 'msg' is the message or translation being
validated and 'text()' returns the real contents of 'msg' (for shorthand).
'''
def __init__(self, expression):
self.expr = expression
def Validate(self, message):
return self.ValidateAndModify(MessageClique.source_language, message)
def ValidateAndModify(self, lang, msg):
def text():
return msg.GetRealContent()
return eval(self.expr, {},
{'lang' : lang,
'text' : text,
'msg' : msg,
})
class MessageClique(object):
'''A message along with all of its translations. Also code to bring
translations together with their original message.'''
# change this to the language code of Messages you add to cliques_.
# TODO(joi) Actually change this based on the <grit> node's source language
source_language = 'en'
# A constant translation we use when asked for a translation into the
# special language constants.CONSTANT_LANGUAGE.
CONSTANT_TRANSLATION = tclib.Translation(text='TTTTTT')
# A pattern to match messages that are empty or whitespace only.
WHITESPACE_MESSAGE = lazy_re.compile(u'^\s*$')
def __init__(self, uber_clique, message, translateable=True, custom_type=None):
'''Create a new clique initialized with just a message.
Note that messages with a body comprised only of whitespace will implicitly
be marked non-translatable.
Args:
uber_clique: Our uber-clique (collection of cliques)
message: tclib.Message()
translateable: True | False
custom_type: instance of clique.CustomType interface
'''
# Our parent
self.uber_clique = uber_clique
# If not translateable, we only store the original message.
self.translateable = translateable
# We implicitly mark messages that have a whitespace-only body as
# non-translateable.
if MessageClique.WHITESPACE_MESSAGE.match(message.GetRealContent()):
self.translateable = False
# A mapping of language identifiers to tclib.BaseMessage and its
# subclasses (i.e. tclib.Message and tclib.Translation).
self.clique = { MessageClique.source_language : message }
# A list of the "shortcut groups" this clique is
# part of. Within any given shortcut group, no shortcut key (e.g. &J)
# must appear more than once in each language for all cliques that
# belong to the group.
self.shortcut_groups = []
# An instance of the CustomType interface, or None. If this is set, it will
# be used to validate the original message and translations thereof, and
# will also get a chance to modify translations of the message.
self.SetCustomType(custom_type)
def GetMessage(self):
'''Retrieves the tclib.Message that is the source for this clique.'''
return self.clique[MessageClique.source_language]
def GetId(self):
'''Retrieves the message ID of the messages in this clique.'''
return self.GetMessage().GetId()
def IsTranslateable(self):
return self.translateable
def AddToShortcutGroup(self, group):
self.shortcut_groups.append(group)
def SetCustomType(self, custom_type):
'''Makes this clique use custom_type for validating messages and
translations, and optionally modifying translations.
'''
self.custom_type = custom_type
if custom_type and not custom_type.Validate(self.GetMessage()):
raise exception.InvalidMessage(self.GetMessage().GetRealContent())
def MessageForLanguage(self, lang, pseudo_if_no_match=True, fallback_to_english=False):
'''Returns the message/translation for the specified language, providing
a pseudotranslation if there is no available translation and a pseudo-
translation is requested.
The translation of any message whatsoever in the special language
'x_constant' is the message "TTTTTT".
Args:
lang: 'en'
pseudo_if_no_match: True
fallback_to_english: False
Return:
tclib.BaseMessage
'''
if not self.translateable:
return self.GetMessage()
if lang == constants.CONSTANT_LANGUAGE:
return self.CONSTANT_TRANSLATION
for msglang in self.clique.keys():
if lang == msglang:
return self.clique[msglang]
if lang == constants.FAKE_BIDI:
return pseudo_rtl.PseudoRTLMessage(self.GetMessage())
if fallback_to_english:
self.uber_clique._AddMissingTranslation(lang, self, is_error=False)
return self.GetMessage()
# If we're not supposed to generate pseudotranslations, we add an error
# report to a list of errors, then fail at a higher level, so that we
# get a list of all messages that are missing translations.
if not pseudo_if_no_match:
self.uber_clique._AddMissingTranslation(lang, self, is_error=True)
return pseudo.PseudoMessage(self.GetMessage())
def AllMessagesThatMatch(self, lang_re, include_pseudo = True):
'''Returns a map of all messages that match 'lang', including the pseudo
translation if requested.
Args:
lang_re: re.compile('fr|en')
include_pseudo: True
Return:
{ 'en' : tclib.Message,
'fr' : tclib.Translation,
pseudo.PSEUDO_LANG : tclib.Translation }
'''
if not self.translateable:
return [self.GetMessage()]
matches = {}
for msglang in self.clique:
if lang_re.match(msglang):
matches[msglang] = self.clique[msglang]
if include_pseudo:
matches[pseudo.PSEUDO_LANG] = pseudo.PseudoMessage(self.GetMessage())
return matches
def AddTranslation(self, translation, language):
'''Add a translation to this clique. The translation must have the same
ID as the message that is the source for this clique.
If this clique is not translateable, the function just returns.
Args:
translation: tclib.Translation()
language: 'en'
Throws:
grit.exception.InvalidTranslation if the translation you're trying to add
doesn't have the same message ID as the source message of this clique.
'''
if not self.translateable:
return
if translation.GetId() != self.GetId():
raise exception.InvalidTranslation(
'Msg ID %s, transl ID %s' % (self.GetId(), translation.GetId()))
assert not language in self.clique
# Because two messages can differ in the original content of their
# placeholders yet share the same ID (because they are otherwise the
# same), the translation we are getting may have different original
# content for placeholders than our message, yet it is still the right
# translation for our message (because it is for the same ID). We must
# therefore fetch the original content of placeholders from our original
# English message.
#
# See grit.clique_unittest.MessageCliqueUnittest.testSemiIdenticalCliques
# for a concrete explanation of why this is necessary.
original = self.MessageForLanguage(self.source_language, False)
if len(original.GetPlaceholders()) != len(translation.GetPlaceholders()):
print ("ERROR: '%s' translation of message id %s does not match" %
(language, translation.GetId()))
assert False
transl_msg = tclib.Translation(id=self.GetId(),
text=translation.GetPresentableContent(),
placeholders=original.GetPlaceholders())
if self.custom_type and not self.custom_type.ValidateAndModify(language, transl_msg):
print "WARNING: %s translation failed validation: %s" % (
language, transl_msg.GetId())
self.clique[language] = transl_msg
| bsd-3-clause |
glwu/python-for-android | python3-alpha/extra_modules/gdata/tlslite/X509.py | 48 | 4292 | """Class representing an X.509 certificate."""
from .utils.ASN1Parser import ASN1Parser
from .utils.cryptomath import *
from .utils.keyfactory import _createPublicRSAKey
class X509:
"""This class represents an X.509 certificate.
@type bytes: L{array.array} of unsigned bytes
@ivar bytes: The DER-encoded ASN.1 certificate
@type publicKey: L{tlslite.utils.RSAKey.RSAKey}
@ivar publicKey: The subject public key from the certificate.
"""
def __init__(self):
self.bytes = createByteArraySequence([])
self.publicKey = None
def parse(self, s):
"""Parse a PEM-encoded X.509 certificate.
@type s: str
@param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded
certificate wrapped with "-----BEGIN CERTIFICATE-----" and
"-----END CERTIFICATE-----" tags).
"""
start = s.find("-----BEGIN CERTIFICATE-----")
end = s.find("-----END CERTIFICATE-----")
if start == -1:
raise SyntaxError("Missing PEM prefix")
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN CERTIFICATE-----") : end]
bytes = base64ToBytes(s)
self.parseBinary(bytes)
return self
def parseBinary(self, bytes):
"""Parse a DER-encoded X.509 certificate.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: A DER-encoded X.509 certificate.
"""
if isinstance(bytes, type("")):
bytes = stringToBytes(bytes)
self.bytes = bytes
p = ASN1Parser(bytes)
#Get the tbsCertificate
tbsCertificateP = p.getChild(0)
#Is the optional version field present?
#This determines which index the key is at.
if tbsCertificateP.value[0]==0xA0:
subjectPublicKeyInfoIndex = 6
else:
subjectPublicKeyInfoIndex = 5
#Get the subjectPublicKeyInfo
subjectPublicKeyInfoP = tbsCertificateP.getChild(\
subjectPublicKeyInfoIndex)
#Get the algorithm
algorithmP = subjectPublicKeyInfoP.getChild(0)
rsaOID = algorithmP.value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the subjectPublicKey
subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)
#Adjust for BIT STRING encapsulation
if (subjectPublicKeyP.value[0] !=0):
raise SyntaxError()
subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])
#Get the modulus and exponent
modulusP = subjectPublicKeyP.getChild(0)
publicExponentP = subjectPublicKeyP.getChild(1)
#Decode them into numbers
n = bytesToNumber(modulusP.value)
e = bytesToNumber(publicExponentP.value)
#Create a public key instance
self.publicKey = _createPublicRSAKey(n, e)
def getFingerprint(self):
"""Get the hex-encoded fingerprint of this certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
return sha.sha(self.bytes).hexdigest()
def getCommonName(self):
"""Get the Subject's Common Name from the certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
import cryptlib_py
import array
c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED)
name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME
try:
try:
length = cryptlib_py.cryptGetAttributeString(c, name, None)
returnVal = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(c, name, returnVal)
returnVal = returnVal.tostring()
except cryptlib_py.CryptException as e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
returnVal = None
return returnVal
finally:
cryptlib_py.cryptDestroyCert(c)
def writeBytes(self):
return self.bytes
| apache-2.0 |
joferkington/numpy | numpy/lib/tests/test_ufunclike.py | 188 | 2024 | from __future__ import division, absolute_import, print_function
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal
)
class TestUfunclike(TestCase):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([True, False, False, False, False, False])
res = ufl.isposinf(a)
assert_equal(res, tgt)
res = ufl.isposinf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([False, True, False, False, False, False])
res = ufl.isneginf(a)
assert_equal(res, tgt)
res = ufl.isneginf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
res = ufl.fix(a)
assert_equal(res, tgt)
res = ufl.fix(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
assert_equal(ufl.fix(3.14), 3)
def test_fix_with_subclass(self):
class MyArray(nx.ndarray):
def __new__(cls, data, metadata=None):
res = nx.array(data, copy=True).view(cls)
res.metadata = metadata
return res
def __array_wrap__(self, obj, context=None):
obj.metadata = self.metadata
return obj
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
assert_array_equal(f, nx.array([1, -1]))
assert_(isinstance(f, MyArray))
assert_equal(f.metadata, 'foo')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
aospx-kitkat/platform_external_chromium_org | tools/telemetry/telemetry/core/platform/proc_util.py | 23 | 1470 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
import resource # pylint: disable=F0401
except ImportError:
resource = None # Not available on all platforms
def _ConvertKbToByte(value):
return int(value.replace('kB','')) * 1024
def _GetProcFileDict(contents):
retval = {}
for line in contents.splitlines():
key, value = line.split(':')
retval[key.strip()] = value.strip()
return retval
def GetSystemCommitCharge(meminfo_contents):
meminfo = _GetProcFileDict(meminfo_contents)
return (_ConvertKbToByte(meminfo['MemTotal'])
- _ConvertKbToByte(meminfo['MemFree'])
- _ConvertKbToByte(meminfo['Buffers'])
- _ConvertKbToByte(meminfo['Cached']))
def GetMemoryStats(status_contents, stats):
status = _GetProcFileDict(status_contents)
if not status or not stats or 'Z' in status['State']:
return {}
return {'VM': int(stats[22]),
'VMPeak': _ConvertKbToByte(status['VmPeak']),
'WorkingSetSize': int(stats[23]) * resource.getpagesize(),
'WorkingSetSizePeak': _ConvertKbToByte(status['VmHWM'])}
def GetIOStats(io_contents):
io = _GetProcFileDict(io_contents)
return {'ReadOperationCount': int(io['syscr']),
'WriteOperationCount': int(io['syscw']),
'ReadTransferCount': int(io['rchar']),
'WriteTransferCount': int(io['wchar'])}
| bsd-3-clause |
hansey/youtube-dl | youtube_dl/extractor/xnxx.py | 112 | 1406 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class XNXXIE(InfoExtractor):
_VALID_URL = r'^https?://(?:video|www)\.xnxx\.com/video(?P<id>[0-9]+)/(.*)'
_TEST = {
'url': 'http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_',
'md5': '0831677e2b4761795f68d417e0b7b445',
'info_dict': {
'id': '1135332',
'ext': 'flv',
'title': 'lida » Naked Funny Actress (5)',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(r'flv_url=(.*?)&',
webpage, 'video URL')
video_url = compat_urllib_parse_unquote(video_url)
video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XNXX.COM',
webpage, 'title')
video_thumbnail = self._search_regex(r'url_bigthumb=(.*?)&',
webpage, 'thumbnail', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'thumbnail': video_thumbnail,
'age_limit': 18,
}
| unlicense |
microcom/odoo | addons/website/models/ir_ui_view.py | 14 | 9536 | # -*- coding: ascii -*-
import copy
import logging
from itertools import groupby
from lxml import etree, html
from openerp import SUPERUSER_ID, api, tools
from openerp.addons.website.models import website
from openerp.http import request
from openerp.osv import osv, fields
_logger = logging.getLogger(__name__)
class view(osv.osv):
_name = "ir.ui.view"
_inherit = ["ir.ui.view", "website.seo.metadata"]
_columns = {
'page': fields.boolean("Whether this view is a web page template (complete)"),
'customize_show': fields.boolean("Show As Optional Inherit"),
'website_id': fields.many2one('website', ondelete='cascade', string="Website"),
}
_defaults = {
'page': False,
'customize_show': False,
}
def unlink(self, cr, uid, ids, context=None):
res = super(view, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return res
def _sort_suitability_key(self):
"""
Key function to sort views by descending suitability
Suitability of a view is defined as follow:
* if the view and request website_id are matched
* then if the view has no set website
"""
context_website_id = self.env.context.get('website_id', 1)
website_id = self.website_id.id or 0
different_website = context_website_id != website_id
return (different_website, website_id)
def filter_duplicate(self):
"""
Filter current recordset only keeping the most suitable view per distinct key
"""
filtered = self.browse([])
for _, group in groupby(self, key=lambda r:r.key):
filtered += sorted(group, key=lambda r:r._sort_suitability_key())[0]
return filtered
def _view_obj(self, cr, uid, view_id, context=None):
if isinstance(view_id, basestring):
if 'website_id' in (context or {}):
domain = [('key', '=', view_id), '|', ('website_id', '=', False), ('website_id', '=', context.get('website_id'))]
rec_id = self.search(cr, uid, domain, order='website_id', context=context)
else:
rec_id = self.search(cr, uid, [('key', '=', view_id)], context=context)
if rec_id:
return self.browse(cr, uid, rec_id, context=context).filter_duplicate()
else:
return self.pool['ir.model.data'].xmlid_to_object(
cr, uid, view_id, raise_if_not_found=True, context=context)
elif isinstance(view_id, (int, long)):
return self.browse(cr, uid, view_id, context=context)
# assume it's already a view object (WTF?)
return view_id
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
def _views_get(self, cr, uid, view_id, options=True, bundles=False, context=None, root=True):
""" For a given view ``view_id``, should return:
* the view itself
* all views inheriting from it, enabled or not
- but not the optional children of a non-enabled child
* all views called from it (via t-call)
"""
try:
view = self._view_obj(cr, uid, view_id, context=context)
except ValueError:
_logger.warning("Could not find view object with view_id '%s'" % (view_id))
# Shall we log that ? Yes, you should !
return []
while root and view.inherit_id:
view = view.inherit_id
result = [view]
node = etree.fromstring(view.arch)
xpath = "//t[@t-call]"
if bundles:
xpath += "| //t[@t-call-assets]"
for child in node.xpath(xpath):
try:
called_view = self._view_obj(cr, uid, child.get('t-call', child.get('t-call-assets')), context=context)
except ValueError:
continue
if called_view not in result:
result += self._views_get(cr, uid, called_view, options=options, bundles=bundles, context=context)
extensions = view.inherit_children_ids
if not options:
# only active children
extensions = (v for v in view.inherit_children_ids if v.active)
# Keep options in a deterministic order regardless of their applicability
for extension in sorted(extensions, key=lambda v: v.id):
for r in self._views_get(
cr, uid, extension,
# only return optional grandchildren if this child is enabled
options=extension.active,
context=context, root=False):
if r not in result:
result.append(r)
return result
@tools.ormcache_context('uid', 'xml_id', keys=('website_id',))
def get_view_id(self, cr, uid, xml_id, context=None):
if context and 'website_id' in context and not isinstance(xml_id, (int, long)):
domain = [('key', '=', xml_id), '|', ('website_id', '=', context['website_id']), ('website_id', '=', False)]
[view_id] = self.search(cr, uid, domain, order='website_id', limit=1, context=context) or [None]
if not view_id:
_logger.warning("Could not find view object with xml_id '%s'" % (xml_id))
raise ValueError('View %r in website %r not found' % (xml_id, context['website_id']))
else:
view_id = super(view, self).get_view_id(cr, uid, xml_id, context=context)
return view_id
@api.cr_uid_ids_context
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if request and getattr(request, 'website_enabled', False):
engine = 'ir.qweb'
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
qcontext = self._prepare_qcontext(cr, uid, context=context)
# add some values
if values:
qcontext.update(values)
# in edit mode ir.ui.view will tag nodes
if not qcontext.get('translatable') and not qcontext.get('rendering_bundle'):
if qcontext.get('editable'):
context = dict(context, inherit_branding=True)
elif request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher'):
context = dict(context, inherit_branding_auto=True)
view_obj = request.website.get_template(id_or_xml_id)
if 'main_object' not in qcontext:
qcontext['main_object'] = view_obj
values = qcontext
return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context)
def _prepare_qcontext(self, cr, uid, context=None):
if not context:
context = {}
company = self.pool['res.company'].browse(cr, SUPERUSER_ID, request.website.company_id.id, context=context)
editable = request.website.is_publisher()
translatable = editable and context.get('lang') != request.website.default_lang_code
editable = not translatable and editable
qcontext = dict(
context.copy(),
website=request.website,
url_for=website.url_for,
slug=website.slug,
res_company=company,
user_id=self.pool.get("res.users").browse(cr, uid, uid),
default_lang_code=request.website.default_lang_code,
languages=request.website.get_languages(),
translatable=translatable,
editable=editable,
menu_data=self.pool['ir.ui.menu'].load_menus_root(cr, uid, context=context) if request.website.is_user() else None,
)
return qcontext
def customize_template_get(self, cr, uid, key, full=False, bundles=False, context=None):
""" Get inherit view's informations of the template ``key``. By default, only
returns ``customize_show`` templates (which can be active or not), if
``full=True`` returns inherit view's informations of the template ``key``.
``bundles=True`` returns also the asset bundles
"""
imd = self.pool['ir.model.data']
theme_view_id = imd.xmlid_to_res_id(cr, uid, 'website.theme')
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
user_groups = set(user.groups_id)
views = self._views_get(
cr, uid, key, bundles=bundles,
context=dict(context or {}, active_test=False))
done = set()
result = []
for v in views:
if not user_groups.issuperset(v.groups_id):
continue
if full or (v.customize_show and v.inherit_id.id != theme_view_id):
if v.inherit_id not in done:
result.append({
'name': v.inherit_id.name,
'id': v.id,
'key': v.key,
'inherit_id': v.inherit_id.id,
'header': True,
'active': False
})
done.add(v.inherit_id)
result.append({
'name': v.name,
'id': v.id,
'key': v.key,
'inherit_id': v.inherit_id.id,
'header': False,
'active': v.active,
})
return result
| agpl-3.0 |
tartavull/google-cloud-python | spanner/tests/system/test_system.py | 1 | 38920 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import math
import operator
import os
import struct
import threading
import time
import unittest
from google.cloud.proto.spanner.v1.type_pb2 import ARRAY
from google.cloud.proto.spanner.v1.type_pb2 import BOOL
from google.cloud.proto.spanner.v1.type_pb2 import BYTES
from google.cloud.proto.spanner.v1.type_pb2 import DATE
from google.cloud.proto.spanner.v1.type_pb2 import FLOAT64
from google.cloud.proto.spanner.v1.type_pb2 import INT64
from google.cloud.proto.spanner.v1.type_pb2 import STRING
from google.cloud.proto.spanner.v1.type_pb2 import TIMESTAMP
from google.cloud.proto.spanner.v1.type_pb2 import Type
from google.cloud._helpers import UTC
from google.cloud.exceptions import GrpcRendezvous
from google.cloud.spanner._helpers import TimestampWithNanoseconds
from google.cloud.spanner.client import Client
from google.cloud.spanner.keyset import KeyRange
from google.cloud.spanner.keyset import KeySet
from google.cloud.spanner.pool import BurstyPool
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
IS_CIRCLE = os.getenv('CIRCLECI') == 'true'
CREATE_INSTANCE = IS_CIRCLE or os.getenv(
'GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE') is not None
if CREATE_INSTANCE:
INSTANCE_ID = 'google-cloud' + unique_resource_id('-')
else:
INSTANCE_ID = os.environ.get('GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE',
'google-cloud-python-systest')
DATABASE_ID = 'test_database'
EXISTING_INSTANCES = []
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _retry_on_unavailable(exc):
"""Retry only errors whose status code is 'UNAVAILABLE'."""
from grpc import StatusCode
return exc.code() == StatusCode.UNAVAILABLE
def _has_all_ddl(database):
return len(database.ddl_statements) == len(DDL_STATEMENTS)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
Config.CLIENT = Client()
retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if '-us-' in config.name]
if len(configs) < 1:
raise ValueError('List instance configs failed in module set up.')
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
if CREATE_INSTANCE:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, config_name)
created_op = Config.INSTANCE.create()
created_op.result(30) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (instance in EXISTING_INSTANCES or
instance == Config.INSTANCE)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(
INSTANCE_ID, Config.INSTANCE_CONFIG.name)
# Make sure metadata unset before reloading.
instance.display_name = None
instance.reload()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, 'Skipping instance creation')
def test_create_instance(self):
ALT_INSTANCE_ID = 'new' + unique_resource_id('-')
instance = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = 'Foo Bar Baz'
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = 'contacts'
COLUMNS = ('contact_id', 'first_name', 'last_name', 'email')
ROW_DATA = (
(1, u'Phred', u'Phlyntstone', u'phred@example.com'),
(2, u'Bharney', u'Rhubble', u'bharney@example.com'),
(3, u'Wylma', u'Phlyntstone', u'wylma@example.com'),
)
ALL = KeySet(all_=True)
SQL = 'SELECT * FROM contacts ORDER BY contact_id'
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, TimestampWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_row_data(self, row_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(row_data), len(expected))
for found, expected in zip(row_data, expected):
self.assertEqual(len(found), len(expected))
for found_cell, expected_cell in zip(found, expected):
if isinstance(found_cell, TimestampWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool)
cls._db.create()
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
databases = list(Config.INSTANCE.list_databases())
self.assertEqual(databases, [self._db])
def test_create_database(self):
pool = BurstyPool()
temp_db_id = 'temp-db' # test w/ hyphen
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
name_attr = operator.attrgetter('name')
expected = sorted([temp_db, self._db], key=name_attr)
databases = list(Config.INSTANCE.list_databases())
found = sorted(databases, key=name_attr)
self.assertEqual(found, expected)
def test_update_database_ddl(self):
pool = BurstyPool()
temp_db_id = 'temp_db'
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(90) # raises on failure / timeout.
operation = temp_db.update_ddl(DDL_STATEMENTS)
# We want to make sure the operation completes.
operation.result(90) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(DDL_STATEMENTS))
def test_db_batch_insert_then_db_snapshot_read_and_db_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(from_snap)
from_db = list(self._db.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(from_db)
def test_db_run_in_transaction_then_db_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
rows = list(self._db.execute_sql(self.SQL))
self._check_row_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(
test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
rows = list(self._db.execute_sql(self.SQL))
self._check_row_data(rows)
class TestSessionAPI(unittest.TestCase, _TestData):
ALL_TYPES_TABLE = 'all_types'
ALL_TYPES_COLUMNS = (
'list_goes_on',
'are_you_sure',
'raw_data',
'hwhen',
'approx_value',
'eye_d',
'description',
'exactly_hwhen',
)
COUNTERS_TABLE = 'counters'
COUNTERS_COLUMNS = (
'name',
'value',
)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = TimestampWithNanoseconds(1995, 8, 31, nanosecond=987654321)
OTHER_NAN, = struct.unpack('<d', b'\x01\x00\x01\x00\x00\x00\xf8\xff')
BYTES_1 = b'Ymlu'
BYTES_2 = b'Ym9vdHM='
ALL_TYPES_ROWDATA = (
([], False, None, None, 0.0, None, None, None),
([1], True, BYTES_1, SOME_DATE, 0.0, 19, u'dog', SOME_TIME),
([5, 10], True, BYTES_1, None, 1.25, 99, u'cat', None),
([], False, BYTES_2, None, float('inf'), 107, u'frog', None),
([3, None, 9], False, None, None, float('-inf'), 207, None, None),
([], False, None, None, float('nan'), 1207, None, None),
([], False, None, None, OTHER_NAN, 2000, None, NANO_TIME),
)
@classmethod
def setUpClass(cls):
pool = BurstyPool()
cls._db = Config.INSTANCE.database(
DATABASE_ID, ddl_statements=DDL_STATEMENTS, pool=pool)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
batch = session.batch()
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
batch.commit()
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.read(
self.ALL_TYPES_TABLE, self.ALL_TYPES_COLUMNS, self.ALL))
self._check_row_data(rows, expected=self.ALL_TYPES_ROWDATA)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
snapshot = session.snapshot(read_timestamp=batch.committed)
rows = list(snapshot.execute_sql(self.SQL))
self._check_row_data(rows)
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_then_execption(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
session.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=GrpcRendezvous)
def test_transaction_read_and_insert_or_update_then_commit(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(
self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows)
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.insert_or_update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, INITIAL_VALUE]])
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_session = self._db.session()
txn_sessions.append(txn_session)
txn_session.create()
self.to_delete.append(txn_session)
threads = [
threading.Thread(
target=txn_session.run_in_transaction,
args=(unit_of_work, pkey))
for txn_session in txn_sessions]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
keyset = KeySet(keys=[(pkey,)])
rows = list(session.read(
self.COUNTERS_TABLE, self.COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(
self.COUNTERS_TABLE, self.COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
PKEY = 'read_w_concurrent_updates'
self._transaction_concurrency_helper(
self._read_w_concurrent_update, PKEY)
def _query_w_concurrent_update(self, transaction, pkey):
SQL = 'SELECT * FROM counters WHERE name = @name'
rows = list(transaction.execute_sql(
SQL,
params={'name': pkey},
param_types={'name': Type(code=STRING)},
))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(
self.COUNTERS_TABLE,
self.COUNTERS_COLUMNS,
[[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
PKEY = 'query_w_concurrent_updates'
self._transaction_concurrency_helper(
self._query_w_concurrent_update, PKEY)
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield [
index,
'First%09d' % (index,),
'Last%09d' % (max_index - index),
'test-%09d@example.com' % (index,),
]
def _set_up_table(self, row_count, db=None):
if db is None:
db = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(db.reload)()
session = db.session()
session.create()
self.to_delete.append(session)
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(
test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = session.run_in_transaction(_unit_of_work, test=self)
return session, committed
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
ROW_COUNT = 400
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
read_tx = session.snapshot(read_timestamp=committed)
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
min_read_ts = session.snapshot(min_read_timestamp=committed)
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
max_staleness = session.snapshot(max_staleness=staleness)
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
exact_staleness = session.snapshot(exact_staleness=staleness)
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
strong = session.snapshot()
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
strong = session.snapshot(multi_use=True)
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
read_ts = session.snapshot(read_timestamp=committed, multi_use=True)
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
exact = session.snapshot(exact_staleness=delta, multi_use=True)
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_manual_consume(self):
ROW_COUNT = 4000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
streamed = snapshot.read(self.TABLE, self.COLUMNS, self.ALL)
retrieved = 0
while True:
try:
streamed.consume_next()
except StopIteration:
break
retrieved += len(streamed.rows)
streamed.rows[:] = ()
self.assertEqual(retrieved, ROW_COUNT)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def test_read_w_index(self):
ROW_COUNT = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = [
'CREATE INDEX contacts_by_last_name ON contacts(last_name)',
]
pool = BurstyPool()
temp_db = Config.INSTANCE.database(
'test_read_w_index', ddl_statements=DDL_STATEMENTS + EXTRA_DDL,
pool=pool)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
session, committed = self._set_up_table(ROW_COUNT, db=temp_db)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index='contacts_by_last_name'))
expected = list(reversed(
[(row[0], row[2]) for row in self._row_data(ROW_COUNT)]))
self._check_row_data(rows, expected)
def test_read_w_single_key(self):
ROW_COUNT = 40
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
def test_read_w_multiple_keys(self):
ROW_COUNT = 40
indices = [0, 5, 17]
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS,
KeySet(keys=[(index,) for index in indices])))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
ROW_COUNT = 4000
LIMIT = 100
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
all_data_rows = list(self._row_data(ROW_COUNT))
expected = all_data_rows[:LIMIT]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
ROW_COUNT = 4000
START = 1000
END = 2000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed, multi_use=True)
all_data_rows = list(self._row_data(ROW_COUNT))
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END+1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[START], end_open=[END])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START:END]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[START], end_open=[END])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1:END]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[START], end_closed=[END])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(
self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[START+1:END+1]
self._check_row_data(rows, expected)
def test_execute_sql_w_manual_consume(self):
ROW_COUNT = 4000
session, committed = self._set_up_table(ROW_COUNT)
snapshot = session.snapshot(read_timestamp=committed)
streamed = snapshot.execute_sql(self.SQL)
retrieved = 0
while True:
try:
streamed.consume_next()
except StopIteration:
break
retrieved += len(streamed.rows)
streamed.rows[:] = ()
self.assertEqual(retrieved, ROW_COUNT)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(self, snapshot, sql, params, param_types, expected):
if 'ORDER' not in sql:
sql += ' ORDER BY eye_d'
rows = list(snapshot.execute_sql(
sql, params=params, param_types=param_types))
self._check_row_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
ROW_COUNT = 40
SQL = 'SELECT * FROM {}'.format(self.TABLE)
session, committed = self._set_up_table(ROW_COUNT)
all_data_rows = list(self._row_data(ROW_COUNT))
strong = session.snapshot(multi_use=True)
before = list(strong.execute_sql(SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
SQL = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
session = self._db.session()
session.create()
self.to_delete.append(session)
snapshot = session.snapshot()
self._check_sql_results(
snapshot,
sql=SQL,
params=None,
param_types=None,
expected=[
[[['a', 1], ['b', 2]]],
])
def test_execute_sql_w_query_param(self):
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.ALL_TYPES_TABLE, self.ALL)
batch.insert(
self.ALL_TYPES_TABLE,
self.ALL_TYPES_COLUMNS,
self.ALL_TYPES_ROWDATA)
snapshot = session.snapshot(
read_timestamp=batch.committed, multi_use=True)
# Cannot equality-test array values. See below for a test w/
# array of IDs.
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE are_you_sure = @sure',
params={'sure': True},
param_types={'sure': Type(code=BOOL)},
expected=[(19,), (99,)],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE raw_data = @bytes_1',
params={'bytes_1': self.BYTES_1},
param_types={'bytes_1': Type(code=BYTES)},
expected=[(19,), (99,)],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE hwhen = @hwhen',
params={'hwhen': self.SOME_DATE},
param_types={'hwhen': Type(code=DATE)},
expected=[(19,)],
)
self._check_sql_results(
snapshot,
sql=('SELECT eye_d FROM all_types WHERE approx_value >= @lower'
' AND approx_value < @upper '),
params={'lower': 0.0, 'upper': 1.0},
param_types={
'lower': Type(code=FLOAT64), 'upper': Type(code=FLOAT64)},
expected=[(None,), (19,)],
)
# Find -inf
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE approx_value = @pos_inf',
params={'pos_inf': float('+inf')},
param_types={'pos_inf': Type(code=FLOAT64)},
expected=[(107,)],
)
# Find +inf
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE approx_value = @neg_inf',
params={'neg_inf': float('-inf')},
param_types={'neg_inf': Type(code=FLOAT64)},
expected=[(207,)],
)
self._check_sql_results(
snapshot,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': 19},
param_types={'my_id': Type(code=INT64)},
expected=[(u'dog',)],
)
self._check_sql_results(
snapshot,
sql='SELECT description FROM all_types WHERE eye_d = @my_id',
params={'my_id': None},
param_types={'my_id': Type(code=INT64)},
expected=[],
)
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE description = @description',
params={'description': u'dog'},
param_types={'description': Type(code=STRING)},
expected=[(19,)],
)
# NaNs cannot be searched for by equality.
self._check_sql_results(
snapshot,
sql='SELECT eye_d FROM all_types WHERE exactly_hwhen = @hwhen',
params={'hwhen': self.SOME_TIME},
param_types={'hwhen': Type(code=TIMESTAMP)},
expected=[(19,)],
)
array_type = Type(code=ARRAY, array_element_type=Type(code=INT64))
self._check_sql_results(
snapshot,
sql=('SELECT description FROM all_types '
'WHERE eye_d in UNNEST(@my_list)'),
params={'my_list': [19, 99]},
param_types={'my_list': array_type},
expected=[(u'dog',), (u'cat',)],
)
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable.")
cls._db = database
def _verify_one_column(self, table_desc):
sql = 'SELECT chunk_me FROM {}'.format(table_desc.table)
rows = list(self._db.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = 'SELECT chunk_me, chunk_me_2 FROM {}'.format(table_desc.table)
rows = list(self._db.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
| apache-2.0 |
marcusramberg/dotfiles | bin/.venv-ansible-venv/lib/python2.6/site-packages/Crypto/Random/Fortuna/FortunaAccumulator.py | 105 | 6775 | # -*- coding: ascii -*-
#
# FortunaAccumulator.py : Fortuna's internal accumulator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
from binascii import b2a_hex
import time
import warnings
from Crypto.pct_warnings import ClockRewindWarning
import SHAd256
import FortunaGenerator
class FortunaPool(object):
"""Fortuna pool type
This object acts like a hash object, with the following differences:
- It keeps a count (the .length attribute) of the number of bytes that
have been added to the pool
- It supports a .reset() method for in-place reinitialization
- The method to add bytes to the pool is .append(), not .update().
"""
digest_size = SHAd256.digest_size
def __init__(self):
self.reset()
def append(self, data):
self._h.update(data)
self.length += len(data)
def digest(self):
return self._h.digest()
def hexdigest(self):
if sys.version_info[0] == 2:
return b2a_hex(self.digest())
else:
return b2a_hex(self.digest()).decode()
def reset(self):
self._h = SHAd256.new()
self.length = 0
def which_pools(r):
"""Return a list of pools indexes (in range(32)) that are to be included during reseed number r.
According to _Practical Cryptography_, chapter 10.5.2 "Pools":
"Pool P_i is included if 2**i is a divisor of r. Thus P_0 is used
every reseed, P_1 every other reseed, P_2 every fourth reseed, etc."
"""
# This is a separate function so that it can be unit-tested.
assert r >= 1
retval = []
mask = 0
for i in range(32):
# "Pool P_i is included if 2**i is a divisor of [reseed_count]"
if (r & mask) == 0:
retval.append(i)
else:
break # optimization. once this fails, it always fails
mask = (mask << 1) | 1L
return retval
class FortunaAccumulator(object):
# An estimate of how many bytes we must append to pool 0 before it will
# contain 128 bits of entropy (with respect to an attack). We reseed the
# generator only after pool 0 contains `min_pool_size` bytes. Note that
# unlike with some other PRNGs, Fortuna's security does not rely on the
# accuracy of this estimate---we can accord to be optimistic here.
min_pool_size = 64 # size in bytes
# If an attacker can predict some (but not all) of our entropy sources, the
# `min_pool_size` check may not be sufficient to prevent a successful state
# compromise extension attack. To resist this attack, Fortuna spreads the
# input across 32 pools, which are then consumed (to reseed the output
# generator) with exponentially decreasing frequency.
#
# In order to prevent an attacker from gaining knowledge of all 32 pools
# before we have a chance to fill them with enough information that the
# attacker cannot predict, we impose a rate limit of 10 reseeds/second (one
# per 100 ms). This ensures that a hypothetical 33rd pool would only be
# needed after a minimum of 13 years of sustained attack.
reseed_interval = 0.100 # time in seconds
def __init__(self):
self.reseed_count = 0
self.generator = FortunaGenerator.AESGenerator()
self.last_reseed = None
# Initialize 32 FortunaPool instances.
# NB: This is _not_ equivalent to [FortunaPool()]*32, which would give
# us 32 references to the _same_ FortunaPool instance (and cause the
# assertion below to fail).
self.pools = [FortunaPool() for i in range(32)] # 32 pools
assert(self.pools[0] is not self.pools[1])
def _forget_last_reseed(self):
# This is not part of the standard Fortuna definition, and using this
# function frequently can weaken Fortuna's ability to resist a state
# compromise extension attack, but we need this in order to properly
# implement Crypto.Random.atfork(). Otherwise, forked child processes
# might continue to use their parent's PRNG state for up to 100ms in
# some cases. (e.g. CVE-2013-1445)
self.last_reseed = None
def random_data(self, bytes):
current_time = time.time()
if (self.last_reseed is not None and self.last_reseed > current_time): # Avoid float comparison to None to make Py3k happy
warnings.warn("Clock rewind detected. Resetting last_reseed.", ClockRewindWarning)
self.last_reseed = None
if (self.pools[0].length >= self.min_pool_size and
(self.last_reseed is None or
current_time > self.last_reseed + self.reseed_interval)):
self._reseed(current_time)
# The following should fail if we haven't seeded the pool yet.
return self.generator.pseudo_random_data(bytes)
def _reseed(self, current_time=None):
if current_time is None:
current_time = time.time()
seed = []
self.reseed_count += 1
self.last_reseed = current_time
for i in which_pools(self.reseed_count):
seed.append(self.pools[i].digest())
self.pools[i].reset()
seed = b("").join(seed)
self.generator.reseed(seed)
def add_random_event(self, source_number, pool_number, data):
assert 1 <= len(data) <= 32
assert 0 <= source_number <= 255
assert 0 <= pool_number <= 31
self.pools[pool_number].append(bchr(source_number))
self.pools[pool_number].append(bchr(len(data)))
self.pools[pool_number].append(data)
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
Nirvedh/CoarseCoherence | tests/configs/simple-atomic-mp.py | 69 | 2376 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
nb_cores = 4
root = BaseSESystem(mem_mode='atomic', cpu_class=AtomicSimpleCPU,
num_cpus=nb_cores).create_root()
| bsd-3-clause |
h3llrais3r/SickRage | lib/babelfish/country.py | 37 | 3242 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from collections import namedtuple
from functools import partial
from pkg_resources import resource_stream # @UnresolvedImport
from .converters import ConverterManager
from . import basestr
COUNTRIES = {}
COUNTRY_MATRIX = []
#: The namedtuple used in the :data:`COUNTRY_MATRIX`
IsoCountry = namedtuple('IsoCountry', ['name', 'alpha2'])
f = resource_stream('babelfish', 'data/iso-3166-1.txt')
f.readline()
for l in f:
iso_country = IsoCountry(*l.decode('utf-8').strip().split(';'))
COUNTRIES[iso_country.alpha2] = iso_country.name
COUNTRY_MATRIX.append(iso_country)
f.close()
class CountryConverterManager(ConverterManager):
""":class:`~babelfish.converters.ConverterManager` for country converters"""
entry_point = 'babelfish.country_converters'
internal_converters = ['name = babelfish.converters.countryname:CountryNameConverter']
country_converters = CountryConverterManager()
class CountryMeta(type):
"""The :class:`Country` metaclass
Dynamically redirect :meth:`Country.frommycode` to :meth:`Country.fromcode` with the ``mycode`` `converter`
"""
def __getattr__(cls, name):
if name.startswith('from'):
return partial(cls.fromcode, converter=name[4:])
return type.__getattribute__(cls, name)
class Country(CountryMeta(str('CountryBase'), (object,), {})):
"""A country on Earth
A country is represented by a 2-letter code from the ISO-3166 standard
:param string country: 2-letter ISO-3166 country code
"""
def __init__(self, country):
if country not in COUNTRIES:
raise ValueError('%r is not a valid country' % country)
#: ISO-3166 2-letter country code
self.alpha2 = country
@classmethod
def fromcode(cls, code, converter):
"""Create a :class:`Country` by its `code` using `converter` to
:meth:`~babelfish.converters.CountryReverseConverter.reverse` it
:param string code: the code to reverse
:param string converter: name of the :class:`~babelfish.converters.CountryReverseConverter` to use
:return: the corresponding :class:`Country` instance
:rtype: :class:`Country`
"""
return cls(country_converters[converter].reverse(code))
def __getstate__(self):
return self.alpha2
def __setstate__(self, state):
self.alpha2 = state
def __getattr__(self, name):
try:
return country_converters[name].convert(self.alpha2)
except KeyError:
raise AttributeError(name)
def __hash__(self):
return hash(self.alpha2)
def __eq__(self, other):
if isinstance(other, basestr):
return str(self) == other
if not isinstance(other, Country):
return False
return self.alpha2 == other.alpha2
def __ne__(self, other):
return not self == other
def __repr__(self):
return '<Country [%s]>' % self
def __str__(self):
return self.alpha2
| gpl-3.0 |
mikebenfield/scikit-learn | examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py | 56 | 4172 | """
=====================================================
Multiclass sparse logisitic regression on newgroups20
=====================================================
Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression
to classify documents from the newgroups20 dataset. Multinomial logistic
regression yields more accurate results and is faster to train on the larger
scale dataset.
Here we use the l1 sparsity that trims the weights of not informative
features to zero. This is good if the goal is to extract the strongly
discriminative vocabulary of each class. If the goal is to get the best
predictive accuracy, it is better to use the non sparsity-inducing l2 penalty
instead.
A more traditional (and possibly better) way to predict on a sparse subset of
input features would be to use univariate feature selection followed by a
traditional (l2-penalised) logistic regression model.
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
print(__doc__)
# Author: Arthur Mensch
t0 = time.clock()
# We use SAGA solver
solver = 'saga'
# Turn down for faster run time
n_samples = 10000
# Memorized fetch_rcv1 for faster access
dataset = fetch_20newsgroups_vectorized('all')
X = dataset.data
y = dataset.target
X = X[:n_samples]
y = y[:n_samples]
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.1)
train_samples, n_features = X_train.shape
n_classes = np.unique(y).shape[0]
print('Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i'
% (train_samples, n_features, n_classes))
models = {'ovr': {'name': 'One versus Rest', 'iters': [1, 3]},
'multinomial': {'name': 'Multinomial', 'iters': [1, 3, 7]}}
for model in models:
# Add initial chance-level values for plotting purpose
accuracies = [1 / n_classes]
times = [0]
densities = [1]
model_params = models[model]
# Small number of epochs for fast runtime
for this_max_iter in model_params['iters']:
print('[model=%s, solver=%s] Number of epochs: %s' %
(model_params['name'], solver, this_max_iter))
lr = LogisticRegression(solver=solver,
multi_class=model,
C=1,
penalty='l1',
fit_intercept=True,
max_iter=this_max_iter,
random_state=42,
)
t1 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t1
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
density = np.mean(lr.coef_ != 0, axis=1) * 100
accuracies.append(accuracy)
densities.append(density)
times.append(train_time)
models[model]['times'] = times
models[model]['densities'] = densities
models[model]['accuracies'] = accuracies
print('Test accuracy for model %s: %.4f' % (model, accuracies[-1]))
print('%% non-zero coefficients for model %s, '
'per class:\n %s' % (model, densities[-1]))
print('Run time (%i epochs) for model %s:'
'%.2f' % (model_params['iters'][-1], model, times[-1]))
fig = plt.figure()
ax = fig.add_subplot(111)
for model in models:
name = models[model]['name']
times = models[model]['times']
accuracies = models[model]['accuracies']
ax.plot(times, accuracies, marker='o',
label='Model: %s' % name)
ax.set_xlabel('Train time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
fig.suptitle('Multinomial vs One-vs-Rest Logistic L1\n'
'Dataset %s' % '20newsgroups')
fig.tight_layout()
fig.subplots_adjust(top=0.85)
run_time = time.clock() - t0
print('Example run in %.3f s' % run_time)
plt.show()
| bsd-3-clause |
ejeschke/ginga | ginga/util/stages/flipswap.py | 3 | 4096 | # This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import trcalc
from ginga.gw import Widgets
from ginga.util import action
from .base import Stage
class FlipSwap(Stage):
_stagename = 'flip-swap'
def __init__(self):
super(FlipSwap, self).__init__()
self._flip_x = False
self._flip_y = False
self._swap_xy = False
self.viewer = None
def build_gui(self, container):
self.viewer = self.pipeline.get('viewer')
# TRANSFORM OPTIONS
fr = Widgets.Frame("Flip / Swap")
captions = (('Transform:', 'label', 'hbox1', 'hbox'),
('_sp1', 'spacer', 'hbox2', 'hbox'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
for wname, name, tf in (('flip_x', "Flip X", self._flip_x),
('flip_y', "Flip Y", self._flip_y),
('swap_xy', "Swap XY", self._swap_xy)):
btn = Widgets.CheckBox(name)
b[wname] = btn
btn.set_state(tf)
btn.add_callback('activated', self.set_transforms_cb)
b.hbox1.add_widget(btn, stretch=0)
b.flip_x.set_tooltip("Flip the image around the X axis")
b.flip_y.set_tooltip("Flip the image around the Y axis")
b.swap_xy.set_tooltip("Swap the X and Y axes in the image")
b.copy_from_viewer = Widgets.Button("Copy from viewer")
b.copy_from_viewer.set_tooltip("Copy flip/swap setting from viewer")
b.copy_from_viewer.add_callback('activated', self.copy_from_viewer_cb)
b.hbox2.add_widget(b.copy_from_viewer, stretch=0)
b.hbox2.add_widget(Widgets.Label(''), stretch=1)
self.w.update(b)
fr.set_widget(w)
container.set_widget(fr)
@property
def flip_x(self):
return self._flip_x
@flip_x.setter
def flip_x(self, tf):
self._flip_x = tf
if self.gui_up:
self.w.flip_x.set_state(tf)
@property
def flip_y(self):
return self._flip_y
@flip_y.setter
def flip_y(self, tf):
self._flip_y = tf
if self.gui_up:
self.w.flip_y.set_state(tf)
@property
def swap_xy(self):
return self._swap_xy
@swap_xy.setter
def swap_xy(self, tf):
self._swap_xy = tf
if self.gui_up:
self.w.swap_xy.set_state(tf)
def _get_state(self):
return dict(flip_x=self._flip_x, flip_y=self._flip_y,
swap_xy=self._swap_xy)
def set_transforms_cb(self, *args):
old = self._get_state()
self._flip_x = self.w.flip_x.get_state()
self._flip_y = self.w.flip_y.get_state()
self._swap_xy = self.w.swap_xy.get_state()
new = self._get_state()
self.pipeline.push(action.AttrAction(self, old, new,
descr="flip / swap"))
self.pipeline.run_from(self)
def copy_from_viewer_cb(self, widget):
old = self._get_state()
self.flip_x, self.flip_y, self.swap_xy = self.viewer.get_transforms()
new = self._get_state()
self.pipeline.push(action.AttrAction(self, old, new,
descr="flip / swap"))
self.pipeline.run_from(self)
def run(self, prev_stage):
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
if self._bypass or data is None:
self.pipeline.send(res_np=data)
return
res_np = trcalc.transform(data, flip_x=self.flip_x, flip_y=self.flip_y,
swap_xy=self.swap_xy)
self.pipeline.send(res_np=res_np)
def export_as_dict(self):
d = super(FlipSwap, self).export_as_dict()
d.update(self._get_state())
return d
def import_from_dict(self, d):
super(FlipSwap, self).import_from_dict(d)
self.flip_x = d['flip_x']
self.flip_y = d['flip_y']
self.swap_xy = d['swap_xy']
| bsd-3-clause |
CityofPittsburgh/pittsburgh-purchasing-suite | purchasing_test/factories.py | 1 | 4495 | # -*- coding: utf-8 -*-
import datetime
import factory
from factory.alchemy import SQLAlchemyModelFactory
from purchasing.database import db
from purchasing.users.models import User, Role, Department
from purchasing.data.contracts import ContractBase, ContractProperty, ContractType
from purchasing.data.companies import Company
from purchasing.data.flows import Flow
from purchasing.data.stages import Stage
from purchasing.data.contract_stages import ContractStageActionItem, ContractStage
from purchasing.public.models import AcceptedEmailDomains
from purchasing.opportunities.models import (
Opportunity, RequiredBidDocument, OpportunityDocument, Category,
Vendor
)
from purchasing.jobs.job_base import JobStatus
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class RoleFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: '{}'.format(n))
class Meta:
model = Role
class DepartmentFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: 'department{}'.format(n))
class Meta:
model = Department
class UserFactory(BaseFactory):
id = factory.Sequence(lambda n: n + 100)
email = factory.Sequence(lambda n: '{}@foo.com'.format(n))
created_at = factory.Sequence(lambda n: datetime.datetime.now())
first_name = factory.Sequence(lambda n: '{}'.format(n))
last_name = factory.Sequence(lambda n: '{}'.format(n))
department = factory.SubFactory(DepartmentFactory)
active = factory.Sequence(lambda n: True)
confirmed_at = factory.Sequence(lambda n: datetime.datetime.now())
@factory.post_generation
def roles(self, create, extracted, **kwargs):
if extracted:
for role in extracted:
self.roles.append(role)
class Meta:
model = User
class FlowFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
flow_name = factory.Sequence(lambda n: '{}'.format(n))
stage_order = factory.Sequence(lambda n: n)
class Meta:
model = Flow
class StageFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
name = factory.Sequence(lambda n: '{}'.format(n))
post_opportunities = factory.Sequence(lambda n: n)
class Meta:
model = Stage
class CompanyFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
class Meta:
model = Company
class ContractTypeFactory(BaseFactory):
id = factory.Sequence(lambda n: n + 100)
class Meta:
model = ContractType
class ContractStageFactory(BaseFactory):
id = factory.Sequence(lambda n: 10 + n)
stage = factory.SubFactory(StageFactory)
flow = factory.SubFactory(FlowFactory)
class Meta:
model = ContractStage
class ContractBaseFactory(BaseFactory):
id = factory.Sequence(lambda n: 100 + n)
contract_type = factory.SubFactory(ContractTypeFactory)
class Meta:
model = ContractBase
class ContractPropertyFactory(BaseFactory):
id = factory.Sequence(lambda n: n + 10)
contract = factory.SubFactory(ContractBaseFactory)
class Meta:
model = ContractProperty
class ContractStageActionItemFactory(BaseFactory):
id = factory.Sequence(lambda n: n + 10)
contract_stage = factory.SubFactory(ContractStageFactory)
class Meta:
model = ContractStageActionItem
class CategoryFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
category_friendly_name = 'i am friendly!'
class Meta:
model = Category
class OpportunityFactory(BaseFactory):
id = factory.Sequence(lambda n: n + 100)
department = factory.SubFactory(DepartmentFactory)
contact = factory.SubFactory(UserFactory)
created_by = factory.SubFactory(UserFactory)
class Meta:
model = Opportunity
class VendorFactory(BaseFactory):
id = factory.Sequence(lambda n: n)
email = factory.Sequence(lambda n: '{}@foo.com'.format(n))
business_name = factory.Sequence(lambda n: '{}'.format(n))
class Meta:
model = Vendor
class RequiredBidDocumentFactory(BaseFactory):
class Meta:
model = RequiredBidDocument
class OpportunityDocumentFactory(BaseFactory):
class Meta:
model = OpportunityDocument
class JobStatusFactory(BaseFactory):
class Meta:
model = JobStatus
class AcceptedEmailDomainsFactory(BaseFactory):
class Meta:
model = AcceptedEmailDomains
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/python/data/experimental/benchmarks/matching_files_benchmark.py | 1 | 3516 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for the experimental `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class MatchingFilesBenchmark(test.Benchmark):
"""Benchmark for the experimental `MatchingFilesDataset`."""
def benchmarkNestedDirectories(self):
tmp_dir = tempfile.mkdtemp()
width = 500
depth = 10
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
deltas = []
iters = 3
for _ in range(iters):
with ops.Graph().as_default():
dataset = matching_files.MatchingFilesDataset(patterns)
next_element = dataset.make_one_shot_iterator().get_next()
with session.Session() as sess:
sub_deltas = []
while True:
try:
start = time.time()
sess.run(next_element)
end = time.time()
sub_deltas.append(end - start)
except errors.OutOfRangeError:
break
deltas.append(sub_deltas)
median_deltas = np.median(deltas, axis=0)
print('Nested directory size (width*depth): %d*%d Median wall time: '
'%fs (read first filename), %fs (read second filename), avg %fs'
' (read %d more filenames)' %
(width, depth, median_deltas[0], median_deltas[1],
np.average(median_deltas[2:]), len(median_deltas) - 2))
self.report_benchmark(
iters=iters,
wall_time=np.sum(median_deltas),
extras={
'read first file:':
median_deltas[0],
'read second file:':
median_deltas[1],
'avg time for reading %d more filenames:' %
(len(median_deltas) - 2):
np.average(median_deltas[2:])
},
name='benchmark_matching_files_dataset_nesteddirectory(%d*%d)' %
(width, depth))
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zvolsky/krepo | languages/hi.py | 164 | 7445 | # coding: utf8
{
'!langcode!': 'hi-in',
'!langname!': 'हिन्दी',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s पंक्तियाँ मिटाएँ',
'%s %%{row} updated': '%s पंक्तियाँ अद्यतन',
'%s selected': '%s चुना हुआ',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'प्रशासनिक इंटरफेस के लिए यहाँ क्लिक करें',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'अप आडमिन (appadmin) अक्षम है क्योंकि असुरक्षित चैनल',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'उपलब्ध डेटाबेस और तालिका',
'Buy this book': 'Buy this book',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'खाली नहीं हो सकता',
'Change Password': 'पासवर्ड बदलें',
'change password': 'change password',
'Check to delete': 'हटाने के लिए चुनें',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'वर्तमान अनुरोध',
'Current response': 'वर्तमान प्रतिक्रिया',
'Current session': 'वर्तमान सेशन',
'customize me!': 'मुझे अनुकूलित (कस्टमाइज़) करें!',
'data uploaded': 'डाटा अपलोड सम्पन्न ',
'Database': 'डेटाबेस',
'Database %s select': 'डेटाबेस %s चुनी हुई',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'मिटाना:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'design': 'रचना करें',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'हो गया!',
'Download': 'Download',
'Edit': 'Edit',
'Edit current record': 'वर्तमान रेकॉर्ड संपादित करें ',
'edit profile': 'edit profile',
'Edit Profile': 'प्रोफ़ाइल संपादित करें',
'Edit This App': 'Edit This App',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'csv फ़ाइल के रूप में निर्यात',
'FAQ': 'FAQ',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Groups': 'Groups',
'Hello from MyApp': 'Hello from MyApp',
'Hello World': 'Hello World',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'आयात / निर्यात',
'Index': 'Index',
'insert new': 'नया डालें',
'insert new %s': 'नया %s डालें',
'Internal State': 'आंतरिक स्थिति',
'Introduction': 'Introduction',
'Invalid Query': 'अमान्य प्रश्न',
'invalid request': 'अवैध अनुरोध',
'Key': 'Key',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'login',
'Login': 'लॉग इन',
'logout': 'logout',
'Logout': 'लॉग आउट',
'Lost Password': 'पासवर्ड खो गया',
'Main Menu': 'Main Menu',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Model',
'My Sites': 'My Sites',
'New Record': 'नया रेकॉर्ड',
'new record inserted': 'नया रेकॉर्ड डाला',
'next 100 rows': 'अगले 100 पंक्तियाँ',
'No databases in this application': 'इस अनुप्रयोग में कोई डेटाबेस नहीं हैं',
'Online examples': 'ऑनलाइन उदाहरण के लिए यहाँ क्लिक करें',
'or import from csv file': 'या csv फ़ाइल से आयात',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'पिछले 100 पंक्तियाँ',
'Python': 'Python',
'Query:': 'प्रश्न:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'Record',
'record does not exist': 'रिकॉर्ड मौजूद नहीं है',
'Record id': 'रिकॉर्ड पहचानकर्ता (आईडी)',
'Register': 'पंजीकृत (रजिस्टर) करना ',
'register': 'register',
'Rows in Table': 'तालिका में पंक्तियाँ ',
'Rows selected': 'चयनित (चुने गये) पंक्तियाँ ',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'स्थिति',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'सुनिश्चित हैं कि आप इस वस्तु को हटाना चाहते हैं?',
'Table': 'तालिका',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Twitter': 'Twitter',
'unable to parse csv file': 'csv फ़ाइल पार्स करने में असमर्थ',
'Update:': 'अद्यतन करना:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'Videos': 'Videos',
'View': 'View',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'वेब२पाइ (web2py) में आपका स्वागत है',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
rhinstaller/anaconda | pyanaconda/modules/timezone/timezone.py | 4 | 7427 | #
# Kickstart module for date and time settings.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pykickstart.errors import KickstartParseError
from pyanaconda.core.i18n import _
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.constants import TIME_SOURCE_SERVER, TIME_SOURCE_POOL
from pyanaconda.core.dbus import DBus
from pyanaconda.core.signal import Signal
from pyanaconda.modules.common.base import KickstartService
from pyanaconda.modules.common.constants.services import TIMEZONE
from pyanaconda.modules.common.structures.timezone import TimeSourceData
from pyanaconda.timezone import NTP_PACKAGE
from pyanaconda.modules.common.containers import TaskContainer
from pyanaconda.modules.common.structures.requirement import Requirement
from pyanaconda.modules.timezone.installation import ConfigureNTPTask, ConfigureTimezoneTask
from pyanaconda.modules.timezone.kickstart import TimezoneKickstartSpecification
from pyanaconda.modules.timezone.timezone_interface import TimezoneInterface
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
class TimezoneService(KickstartService):
"""The Timezone service."""
def __init__(self):
super().__init__()
self.timezone_changed = Signal()
self._timezone = "America/New_York"
self.is_utc_changed = Signal()
self._is_utc = False
self.ntp_enabled_changed = Signal()
self._ntp_enabled = True
self.time_sources_changed = Signal()
self._time_sources = []
def publish(self):
"""Publish the module."""
TaskContainer.set_namespace(TIMEZONE.namespace)
DBus.publish_object(TIMEZONE.object_path, TimezoneInterface(self))
DBus.register_service(TIMEZONE.service_name)
@property
def kickstart_specification(self):
"""Return the kickstart specification."""
return TimezoneKickstartSpecification
def process_kickstart(self, data):
"""Process the kickstart data."""
self.set_timezone(data.timezone.timezone)
self.set_is_utc(data.timezone.isUtc)
self.set_ntp_enabled(not data.timezone.nontp)
sources = []
for hostname in data.timezone.ntpservers:
source = TimeSourceData()
source.type = TIME_SOURCE_SERVER
source.hostname = hostname
source.options = ["iburst"]
sources.append(source)
for source_data in data.timesource.dataList():
if source_data.ntp_disable:
self.set_ntp_enabled(False)
continue
source = TimeSourceData()
source.options = ["iburst"]
if source_data.ntp_server:
source.type = TIME_SOURCE_SERVER
source.hostname = source_data.ntp_server
elif source_data.ntp_pool:
source.type = TIME_SOURCE_POOL
source.hostname = source_data.ntp_pool
else:
KickstartParseError(
_("Invalid time source."),
lineno=source_data.lineno
)
if source_data.nts:
source.options.append("nts")
sources.append(source)
self.set_time_sources(sources)
def setup_kickstart(self, data):
"""Set up the kickstart data."""
data.timezone.timezone = self.timezone
data.timezone.isUtc = self.is_utc
source_data_list = data.timesource.dataList()
if not self.ntp_enabled:
source_data = data.TimesourceData()
source_data.ntp_disable = True
source_data_list.append(source_data)
return
for source in self.time_sources:
source_data = data.TimesourceData()
if source.type == TIME_SOURCE_SERVER:
source_data.ntp_server = source.hostname
elif source.type == TIME_SOURCE_POOL:
source_data.ntp_pool = source.hostname
else:
log.warning("Skipping %s.", source)
continue
if "nts" in source.options:
source_data.nts = True
source_data_list.append(source_data)
@property
def timezone(self):
"""Return the timezone."""
return self._timezone
def set_timezone(self, timezone):
"""Set the timezone."""
self._timezone = timezone
self.timezone_changed.emit()
log.debug("Timezone is set to %s.", timezone)
@property
def is_utc(self):
"""Is the hardware clock set to UTC?"""
return self._is_utc
def set_is_utc(self, is_utc):
"""Set if the hardware clock is set to UTC."""
self._is_utc = is_utc
self.is_utc_changed.emit()
log.debug("UTC is set to %s.", is_utc)
@property
def ntp_enabled(self):
"""Enable automatic starting of NTP service."""
return self._ntp_enabled
def set_ntp_enabled(self, ntp_enabled):
"""Enable or disable automatic starting of NTP service."""
self._ntp_enabled = ntp_enabled
self.ntp_enabled_changed.emit()
log.debug("NTP is set to %s.", ntp_enabled)
@property
def time_sources(self):
"""Return a list of time sources."""
return self._time_sources
def set_time_sources(self, servers):
"""Set time sources."""
self._time_sources = list(servers)
self.time_sources_changed.emit()
log.debug("Time sources are set to: %s", servers)
def collect_requirements(self):
"""Return installation requirements for this module.
:return: a list of requirements
"""
requirements = []
# Add ntp service requirements.
if self._ntp_enabled:
requirements.append(
Requirement.for_package(NTP_PACKAGE, reason="Needed to run NTP service.")
)
return requirements
def install_with_tasks(self):
"""Return the installation tasks of this module.
:return: list of installation tasks
"""
return [
ConfigureTimezoneTask(
sysroot=conf.target.system_root,
timezone=self.timezone,
is_utc=self.is_utc
),
ConfigureNTPTask(
sysroot=conf.target.system_root,
ntp_enabled=self.ntp_enabled,
ntp_servers=self.time_sources
)
]
| gpl-2.0 |
tdsimao/tt | django/contrib/comments/views/comments.py | 5 | 5271 | from django import http
from django.conf import settings
from utils import next_redirect, confirmation_view
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.http import require_POST
from django.contrib import comments
from django.contrib.comments import signals
from django.views.decorators.csrf import csrf_protect
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self, why):
super(CommentPostBadRequest, self).__init__()
if settings.DEBUG:
self.content = render_to_string("comments/400-debug.html", {"why": why})
@csrf_protect
@require_POST
def post_comment(request, next=None, using=None):
"""
Post a comment.
HTTP POST is required. If ``POST['submit'] == "preview"`` or if there are
errors a preview template, ``comments/preview.html``, will be rendered.
"""
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if request.user.is_authenticated():
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = models.get_model(*ctype.split(".", 1))
target = model._default_manager.using(using).get(pk=object_pk)
except TypeError:
return CommentPostBadRequest(
"Invalid content_type value: %r" % escape(ctype))
except AttributeError:
return CommentPostBadRequest(
"The given content-type %r does not resolve to a valid model." % \
escape(ctype))
except ObjectDoesNotExist:
return CommentPostBadRequest(
"No object matching content-type %r and object PK %r exists." % \
(escape(ctype), escape(object_pk)))
except (ValueError, ValidationError), e:
return CommentPostBadRequest(
"Attempting go get content-type %r and object PK %r exists raised %s" % \
(escape(ctype), escape(object_pk), e.__class__.__name__))
# Do we want to preview the comment?
preview = "preview" in data
# Construct the comment form
form = comments.get_form()(target, data=data)
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % \
escape(str(form.security_errors())))
# If there are errors or if we requested a preview show the comment
if form.errors or preview:
template_list = [
# These first two exist for purely historical reasons.
# Django v1.0 and v1.1 allowed the underscore format for
# preview templates, so we have to preserve that format.
"comments/%s_%s_preview.html" % (model._meta.app_label, model._meta.module_name),
"comments/%s_preview.html" % model._meta.app_label,
# Now the usual directory based template heirarchy.
"comments/%s/%s/preview.html" % (model._meta.app_label, model._meta.module_name),
"comments/%s/preview.html" % model._meta.app_label,
"comments/preview.html",
]
return render_to_response(
template_list, {
"comment": form.data.get("comment", ""),
"form": form,
"next": data.get("next", next),
},
RequestContext(request, {})
)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if request.user.is_authenticated():
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
for (receiver, response) in responses:
if response == False:
return CommentPostBadRequest(
"comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return next_redirect(request, next, comment_done, c=comment._get_pk_val())
comment_done = confirmation_view(
template = "comments/posted.html",
doc = """Display a "comment was posted" success page."""
)
| gpl-2.0 |
MadcowD/libgdx | extensions/gdx-freetype/jni/freetype-2.6.2/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| apache-2.0 |
steebchen/youtube-dl | youtube_dl/extractor/xfileshare.py | 7 | 7547 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
decode_packed_codes,
determine_ext,
ExtractorError,
int_or_none,
NO_DEFAULT,
urlencode_postdata,
)
class XFileShareIE(InfoExtractor):
_SITES = (
(r'daclips\.(?:in|com)', 'DaClips'),
(r'filehoot\.com', 'FileHoot'),
(r'gorillavid\.(?:in|com)', 'GorillaVid'),
(r'movpod\.in', 'MovPod'),
(r'powerwatch\.pw', 'PowerWatch'),
(r'rapidvideo\.ws', 'Rapidvideo.ws'),
(r'thevideobee\.to', 'TheVideoBee'),
(r'vidto\.me', 'Vidto'),
(r'streamin\.to', 'Streamin.To'),
(r'xvidstage\.com', 'XVIDSTAGE'),
(r'vidabc\.com', 'Vid ABC'),
(r'vidbom\.com', 'VidBom'),
(r'vidlo\.us', 'vidlo'),
(r'rapidvideo\.(?:cool|org)', 'RapidVideo.TV'),
(r'fastvideo\.me', 'FastVideo.me'),
)
IE_DESC = 'XFileShare based sites: %s' % ', '.join(list(zip(*_SITES))[1])
_VALID_URL = (r'https?://(?P<host>(?:www\.)?(?:%s))/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
% '|'.join(site for site in list(zip(*_SITES))[0]))
_FILE_NOT_FOUND_REGEXES = (
r'>(?:404 - )?File Not Found<',
r'>The file was removed by administrator<',
)
_TESTS = [{
'url': 'http://gorillavid.in/06y9juieqpmi',
'md5': '5ae4a3580620380619678ee4875893ba',
'info_dict': {
'id': '06y9juieqpmi',
'ext': 'mp4',
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
'thumbnail': r're:http://.*\.jpg',
},
}, {
'url': 'http://gorillavid.in/embed-z08zf8le23c6-960x480.html',
'only_matching': True,
}, {
'url': 'http://daclips.in/3rso4kdn6f9m',
'md5': '1ad8fd39bb976eeb66004d3a4895f106',
'info_dict': {
'id': '3rso4kdn6f9m',
'ext': 'mp4',
'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
'thumbnail': r're:http://.*\.jpg',
}
}, {
'url': 'http://movpod.in/0wguyyxi1yca',
'only_matching': True,
}, {
'url': 'http://filehoot.com/3ivfabn7573c.html',
'info_dict': {
'id': '3ivfabn7573c',
'ext': 'mp4',
'title': 'youtube-dl test video \'äBaW_jenozKc.mp4.mp4',
'thumbnail': r're:http://.*\.jpg',
},
'skip': 'Video removed',
}, {
'url': 'http://vidto.me/ku5glz52nqe1.html',
'info_dict': {
'id': 'ku5glz52nqe1',
'ext': 'mp4',
'title': 'test'
}
}, {
'url': 'http://powerwatch.pw/duecjibvicbu',
'info_dict': {
'id': 'duecjibvicbu',
'ext': 'mp4',
'title': 'Big Buck Bunny trailer',
},
}, {
'url': 'http://xvidstage.com/e0qcnl03co6z',
'info_dict': {
'id': 'e0qcnl03co6z',
'ext': 'mp4',
'title': 'Chucky Prank 2015.mp4',
},
}, {
# removed by administrator
'url': 'http://xvidstage.com/amfy7atlkx25',
'only_matching': True,
}, {
'url': 'http://vidabc.com/i8ybqscrphfv',
'info_dict': {
'id': 'i8ybqscrphfv',
'ext': 'mp4',
'title': 're:Beauty and the Beast 2017',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.rapidvideo.cool/b667kprndr8w',
'only_matching': True,
}, {
'url': 'http://www.fastvideo.me/k8604r8nk8sn/FAST_FURIOUS_8_-_Trailer_italiano_ufficiale.mp4.html',
'only_matching': True
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1'
% '|'.join(site for site in list(zip(*XFileShareIE._SITES))[0]),
webpage)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://%s/%s' % (mobj.group('host'), video_id)
webpage = self._download_webpage(url, video_id)
if any(re.search(p, webpage) for p in self._FILE_NOT_FOUND_REGEXES):
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
fields = self._hidden_inputs(webpage)
if fields['op'] == 'download1':
countdown = int_or_none(self._search_regex(
r'<span id="countdown_str">(?:[Ww]ait)?\s*<span id="cxc">(\d+)</span>\s*(?:seconds?)?</span>',
webpage, 'countdown', default=None))
if countdown:
self._sleep(countdown, video_id)
webpage = self._download_webpage(
url, video_id, 'Downloading video page',
data=urlencode_postdata(fields), headers={
'Referer': url,
'Content-type': 'application/x-www-form-urlencoded',
})
title = (self._search_regex(
(r'style="z-index: [0-9]+;">([^<]+)</span>',
r'<td nowrap>([^<]+)</td>',
r'h4-fine[^>]*>([^<]+)<',
r'>Watch (.+) ',
r'<h2 class="video-page-head">([^<]+)</h2>',
r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<'), # streamin.to
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None) or video_id).strip()
def extract_formats(default=NO_DEFAULT):
urls = []
for regex in (
r'(?:file|src)\s*:\s*(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1',
r'file_link\s*=\s*(["\'])(?P<url>http(?:(?!\1).)+)\1',
r'addVariable\((\\?["\'])file\1\s*,\s*(\\?["\'])(?P<url>http(?:(?!\2).)+)\2\)',
r'<embed[^>]+src=(["\'])(?P<url>http(?:(?!\1).)+\.(?:m3u8|mp4|flv)(?:(?!\1).)*)\1'):
for mobj in re.finditer(regex, webpage):
video_url = mobj.group('url')
if video_url not in urls:
urls.append(video_url)
formats = []
for video_url in urls:
if determine_ext(video_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False))
else:
formats.append({
'url': video_url,
'format_id': 'sd',
})
if not formats and default is not NO_DEFAULT:
return default
self._sort_formats(formats)
return formats
formats = extract_formats(default=None)
if not formats:
webpage = decode_packed_codes(self._search_regex(
r"(}\('(.+)',(\d+),(\d+),'[^']*\b(?:file|embed)\b[^']*'\.split\('\|'\))",
webpage, 'packed code'))
formats = extract_formats()
thumbnail = self._search_regex(
r'image\s*:\s*["\'](http[^"\']+)["\'],', webpage, 'thumbnail', default=None)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
LefterisJP/refuclib | scripts/gen_rf_xmacro_def.py | 2 | 15193 | #the maximum number of arguments for a function
maxArgs = 36
#stuff to import to be able to open files in the parent directory
import os.path
import sys
print("Generating rf_xmacro_def.h ...");
f = open(os.path.dirname(sys.argv[0]) + "/../include/Preprocessor/rf_xmacro_def.h","w");
#put some required definitions at the top
f.write("/**\n** @file rf_xmacro_def.h\n** @author Lefteris\n** @date 13/02/2012\n**\n\
** This file contains macros to easily define functions with default arguments\n\
** in source files using the C preprocessor. It is automatically generated\n\
** by the python script gen_rf_xmacro_def.py\n");
f.write("*/");
f.write("\n#include \"rf_xmacro_sub.h\"//to be able to do subtraction with the preprocessor\n")
f.write("#include \"rf_xmacro_gt.h\"//to be able to do greater than comparisons with the preprocessor\n")
f.write("#include \"rf_xmacro_lt.h\"//to be able to do less than comparisons with the preprocessor\n\n")
f.write("#include \"rf_xmacro_eq.h\"//to be able to do equality comparisons with the preprocessor\n\n")
f.write("#include \"rf_xmacro_argcount.h\"//to be able to count the number of arguments\n\n")
f.write("#include \"rf_xmacro_utils.h\"//to use util macros for token pasting");
#Write the macros used to pick the correct macro for function definition
f.write("\n\n\n///These macros here are used in order to pickup the correct function macro definition\n")
f.write("#define i_RP_DEF_CHECK2(A__,B__) RP_GT(B__,A__)\n")
f.write("#define i_RP_DEF_CHECK1(A__,B__) RP_LT(B__,A__)\n")
f.write("#define i_RP_DEF_MACRO_CHECK1(maxArgsN__,defArgsN__,thisArgN__) i_RP_DEF_CHECK1(RP_SUB(maxArgsN__,defArgsN__),thisArgN__)\n");
f.write("#define i_RP_DEF_MACRO_CHECK2(maxArgsN__,defArgsN__,thisArgN__) i_RP_DEF_CHECK2(RP_SUB(maxArgsN__,defArgsN__),thisArgN__)\n");
f.write("#define i_RP_PICK_FUNC_DEF(funcmacro__,eqresult__,cmpresult__) i_RP_PASTE3(funcmacro__,eqresult__,cmpresult__)\n\n");
#Write the i_REVERSE macros to reverse the arguments list
i = maxArgs
f.write("\n///These macros are used to reverse a list of arguments. They are used to obtain the appropriate default arguments\n")
while(i > 0):
f.write("#define i_REVERSE"+str(i)+"(");
j = 1;
#put the parameters of the macro
while(j<=i):
f.write("def"+str(j)+",");
j+=1;
f.write("...) ");
#now comes the macro definition
j=i;
while(j>0):
f.write("def"+str(j)+",");
j-=1;
f.write("\n");
i-=1;
#final macro
f.write("#define i_REVERSE0(...) \n");
#keep a commented example of how things were previously using REVERSE MACRO for the RP_DEFAULT_ARGS_X Macro
f.write("\n///The following is commented because I used to have it like that to reverse the arguments list. Keeping an example in case I want to remember how to do it in the future\n");
f.write("/*\n#define i_FIRST2_IMP(_1_,_2_,...) ,_1_,_2_\n");
f.write("#define i_FIRST2(...) i_FIRST2_IMP(__VA_ARGS__)\n");
f.write("#define i_RP_DEFAULT_ARGS_2(defArgsN__,...) i_FIRST2(i_RP_PASTE2(i_REVERSE,defArgsN__)(__VA_ARGS__))\n*/\n");
#Now Write the macros that take in the N default arguments from the default arguments list
i = maxArgs;
f.write("\n//!These macros are used to get the appropriate number of default arguments\n\n\n")
while(i>0):
f.write("//! Macros to get the appropriate number of arguments for "+str(i)+"\n");
#1)The i_AFTER_FIRSTXX MAcro
f.write("#define i_AFTER_FIRST"+str(i)+"(argsN__,");
j=1;
#put the parameters of the macro
while(j<=i):
f.write("p"+str(j)+"__,");
j+=1;
f.write("...) ");
#now comes the macro definition
f.write("i_FIRST##argsN__(__VA_ARGS__) \n");
#2)The i_AFTER_FIRSTXX_NOCOMMA MAcro
f.write("#define i_AFTER_FIRST"+str(i)+"_NOCOMMA(argsN__,");
j=1;
#put the parameters of the macro
while(j<=i):
f.write("p"+str(j)+"__,");
j+=1;
f.write("...) ");
#now comes the macro definition
f.write("i_FIRST##argsN__##_NOCOMMA(__VA_ARGS__) \n");
#3)The i_FIRSTXX Macro
f.write("#define i_FIRST"+str(i)+"(");
#put the parameters of the macro
j=1
while(j<=i):
f.write("p"+str(j)+"__,");
j+=1;
f.write("...) ");
#and now the definition
j=1;
while(j<=i):
f.write(",p"+str(j)+"__");
j+=1;
f.write("\n");
#4)The i_FIRSTXX_NOCOMMA Macro
f.write("#define i_FIRST"+str(i)+"_NOCOMMA(");
j=1
while(j<=i):
f.write("p"+str(j)+"__,");
j+=1;
f.write("...) ");
#and now the definition
j=1;
while(j<i):
f.write("p"+str(j)+"__,");
j+=1;
f.write("p"+str(j)+"__ \n");
#5)The i_LASTXX Macro
f.write("#define i_LAST"+str(i)+"(defArgsN__,...) ");
f.write("i_LAST"+str(i)+"_IMP(i_AFTER_FIRST,RP_SUB(defArgsN__,"+str(i)+"))("+str(i)+",__VA_ARGS__)\n");
#6)THE i_LASTXX_IMP Macro
f.write("#define i_LAST"+str(i)+"_IMP(id__,subres__) ");
f.write("i_RP_PASTE2(id__,subres__) \n");
#7)The i_LASTXX_NOCOMMA Macro
f.write("#define i_LAST"+str(i)+"_NOCOMMA(defArgsN__,...) ");
f.write("i_LAST"+str(i)+"_NOCCOMA_IMP(i_AFTER_FIRST,RP_SUB(defArgsN__,"+str(i)+"))("+str(i)+",__VA_ARGS__)\n");
#8)THE i_LASTXX_NOCCOMA_IMP Macro
f.write("#define i_LAST"+str(i)+"_NOCCOMA_IMP(id__,subres__) ");
f.write("i_RP_PASTE3(id__,subres__,_NOCOMMA) \n");
#9)The i_RP_DEFAULT_ARGS_X Macro when last call == 0
f.write("#define i_RP_DEFAULT_ARGS_"+str(i)+"_LAST0(defArgsN__,...) ");
f.write("i_LAST"+str(i)+"(defArgsN__,__VA_ARGS__)\n");
#was: f.write("i_LAST"+str(i)+"(i_RP_PASTE2(i_REVERSE,defArgsN__)(__VA_ARGS__))\n");
#10) The i_RP_DEFAULT_ARGS_X Macro when last call == 1
f.write("#define i_RP_DEFAULT_ARGS_"+str(i)+"_LAST1(defArgsN__,...) ");
f.write("i_LAST"+str(i)+"_NOCOMMA(defArgsN__,__VA_ARGS__)\n\n");
i-=1
#final case macros (i == 0)
f.write("//! Macros to get the appropriate number of arguments for 0\n");
#1)The i_AFTER_FIRSTXX_IMP MAcro
f.write("#define i_AFTER_FIRST0_IMP(argsN__,...) ,__VA_ARGS__ \n");
#2)The i_AFTER_FIRSTXX_NOCOMMA_IMP MAcro
f.write("#define i_AFTER_FIRST0_NOCOMMA_IMP(argsN__,...) __VA_ARGS__ \n");
#3)The i_AFTER_FIRSTXX Macro
f.write("#define i_AFTER_FIRST0(...) ");
f.write("i_AFTER_FIRST0_IMP(__VA_ARGS__)\n");
#4)The i_AFTER_FIRSTXX_NOCOMMA Macro
f.write("#define i_AFTER_FIRST0_NOCOMMA(...) ");
f.write("i_AFTER_FIRST0_NOCOMMA_IMP(__VA_ARGS__)\n");
#5)The i_LASTXX Macro
f.write("#define i_LAST0(defArgsN__,...) \n");
#6)THE i_LASTXX_IMP Macro
f.write("#define i_LAST0_IMP(id__,subres__) \n");
#7)The i_LASTXX_NOCOMMA Macro
f.write("#define i_LAST0_NOCOMMA(defArgsN__,...) \n");
#8)THE i_LASTXX_NOCCOMA_IMP Macro
f.write("#define i_LAST0_NOCCOMA_IMP(id__,subres__) \n");
#9)The i_RP_DEFAULT_ARGS_X Macro when last call == 0
f.write("#define i_RP_DEFAULT_ARGS_0_LAST0(defArgsN__,...) ");
f.write("i_LAST0(defArgsN__,__VA_ARGS__)\n");
#10) The i_RP_DEFAULT_ARGS_X Macro when last call == 1
f.write("#define i_RP_DEFAULT_ARGS_0_LAST1(defArgsN__,...) ");
f.write("i_LAST0_NOCOMMA(defArgsN__,__VA_ARGS__)\n\n");
i-=1
#Add the macro that gives us the appropriate default argument macro
f.write("//! This macro gets the appropriate default arguments macro\n");
f.write("#define i_RP_GET_DEFAULT_ARG(args__,maxArgsN__) i_RP_GET_DEFAULT_ARG_IMP(i_RP_DEFAULT_ARGS_,args__,_LAST,RP_EQ(args__,maxArgsN__))\n");
f.write("#define i_RP_GET_DEFAULT_ARG_IMP(macronarf__,args__,identifier__,last__) i_RP_PASTE4(macronarf__,args__,identifier__,last__)\n\n");
###############################################################################################################
#Now Write the big bulk of the macros. Namely those which are used to define functions with default arguments
####################################################################################################################
i = maxArgs;
f.write("\n//! These macros are used when you want to define a function in a source file with default arguments and want to avoid lots of typing\n");
while(i>0):
f.write("//! Function definition macros for "+str(i)+" arguments functions. No comments here. All comments are in the generating python file.\n");
#0)write the macro from which everything is called DEFINE [RF_DEFINE_DFUNCXX]
f.write("#define RF_DEFINE_DFUNC"+str(i)+"(retType__,funcNarf__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...)\\\n");
#now call the picking macro CALL [i_RP_DEFINE_FUNC]
f.write("i_RP_DEFINE_FUNC"+str(i-1)+"(funcNarf__,retType__,"+str(i)+",RF_NARG(__VA_ARGS__),");
#put the rest of the parameters of the macro
j=1;
while(j<i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("__VA_ARGS__)\\\n");
#Finally add the definition for the full function. THIS IS WHERE THE MACRO WILL END UP. THIS IS WHAT PRECEDES USER CODE. Because of this the user can open brackets and write the function's body
f.write("retType__ funcNarf__##"+str(i)+"(");
#put the rest of the parameters of the macro
j=1;
while(j< i):
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name,");
j+=1;
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name)\n\n\n");
#1)write the general definition macro which picks up the rest DEFINE [i_RP_DEFINE_FUNC]
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...)\\\n");
#now call the macro which picks the appropriate macro definition. CALL [i_RP_PICK_FUNC_DEF]
f.write("i_RP_PICK_FUNC_DEF(i_RP_DEFINE_FUNC"+str(i)+"_IMP,i_RP_DEF_MACRO_CHECK1(maxArgsN__,defArgsN__,"+str(i)+")");
f.write(",i_RP_DEF_MACRO_CHECK2(maxArgsN__,defArgsN__,"+str(i)+"))(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("__VA_ARGS__)\n\n");
#1) Definition 1 , if compulsory number of arguments is equal to current DEFINE [i_RP_DEFINE_FUNCXX_IMP00]
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"_IMP00(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...)\\\n");
#now define the function signature. Notice it is defined as inline
f.write("inline retType__ funcNarf__##"+str(i)+"(");
j=1;
while(j<i):
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name,");
j+=1;
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name)\\\n");
f.write("{\\\n");
#now call the maxArgs function from here
f.write("\t return funcNarf__##maxArgsN__(");
#the parameters of the function
j=1;
while(j<i):
f.write("arg__"+str(j)+"Name,");
j+=1;
f.write("arg__"+str(j)+"Name ");
#now use the macro to get the appropiate default arguments
f.write("i_RP_GET_DEFAULT_ARG(RP_SUB(maxArgsN__,"+str(i)+"),maxArgsN__)(defArgsN__,__VA_ARGS__));\\\n");
f.write("}\n\n");
#2) Definition 2, if compulsory number of arguments is less than current
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"_IMP01(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...)\\\n");
#now define the function signature. Notice it is defined as inline
f.write("inline retType__ funcNarf__##"+str(i)+"(");
j=1;
while(j<i):
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name,");
j+=1;
f.write("arg__"+str(j)+"Type arg__"+str(j)+"Name)\\\n");
f.write("{\\\n");
#now call the maxArgs function from here
f.write("\t return funcNarf__##maxArgsN__(");
#the parameters of the function
j=1;
while(j<i):
f.write("arg__"+str(j)+"Name,");
j+=1;
f.write("arg__"+str(j)+"Name ");
#now use the macro to get the appropiate default arguments
f.write("i_RP_GET_DEFAULT_ARG(RP_SUB(maxArgsN__,"+str(i)+"),maxArgsN__)(defArgsN__,__VA_ARGS__));\\\n");
f.write("}\\\n");
#also call the definition macro for lower arguments number
if(i!=1):
f.write("i_RP_DEFINE_FUNC"+str(i-1)+"(funcNarf__,retType__,maxArgsN__,defArgsN__,");
j=1;
while(j<i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("__VA_ARGS__)\n");
else:
f.write("i_RP_DEFINE_FUNC"+str(i-1)+"(funcNarf__,retType__,maxArgsN__,defArgsN__,__VA_ARGS__)\n");
#3) Definitions if none of the first two are true
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"_IMP10(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...) \n");
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"_IMP11(funcNarf__,retType__,maxArgsN__,defArgsN__,");
#put the rest of the parameters of the macro
j=1;
while(j<=i):
f.write("arg__"+str(j)+"Type,arg__"+str(j)+"Name,");
j+=1;
f.write("...) \n");
f.write("\n\n\n");
#end of one loop
i-=1;
#Final case FUNC0
f.write("//! Function definition macros for 0 arguments functions\n");
#0)write the general definition which picks up the rest
f.write("#define i_RP_DEFINE_FUNC0(funcNarf__,retType__,maxArgsN__,defArgsN__,...)\\\n");
#now call the macro which picks the appropriate macro definition
f.write("i_RP_PICK_FUNC_DEF(i_RP_DEFINE_FUNC0_IMP,i_RP_DEF_MACRO_CHECK1(maxArgsN__,defArgsN__,0)");
f.write(",i_RP_DEF_MACRO_CHECK2(maxArgsN__,defArgsN__,0))(funcNarf__,retType__,maxArgsN__,defArgsN__,");
f.write("__VA_ARGS__)\n\n");
#1) Definition 1 , if compulsory number of arguments is equal to current (only one possible)
f.write("#define i_RP_DEFINE_FUNC0_IMP00(funcNarf__,retType__,maxArgsN__,defArgsN__,...)\\\n");
#now define the function signature
f.write("retType__ funcNarf__##0()\\\n");
f.write("{\\\n");
#now call the maxArgs function from here with all the default values since this can only have default values (0 argument case)
f.write("\t funcNarf__##maxArgsN__(__VA_ARGS__);\\\n");
f.write("}\n");
#2) Definition 2, if compulsory number of arguments is less than current (illegal to happen)
f.write("#define i_RP_DEFINE_FUNC0_IMP01(funcNarf__,retType__,maxArgsN__,defArgsN__,...) \n");
#3) Definitions if none of the first two are true
f.write("#define i_RP_DEFINE_FUNC0_IMP10(funcNarf__,retType__,maxArgsN__,defArgsN__,...) \n");
f.write("#define i_RP_DEFINE_FUNC0_IMP11(funcNarf__,retType__,maxArgsN__,defArgsN__,...) \n");
#at the end close the file
print("rf_xmacro_def.h has been generated!");
f.close();
| bsd-3-clause |
chouseknecht/ansible | test/units/module_utils/network/meraki/test_meraki.py | 21 | 5062 | # -*- coding: utf-8 -*-
# Copyright 2019 Kevin Breit <kevin.breit@kevinbreit.net>
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import pytest
from units.compat import unittest, mock
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec, HTTPError, RateLimitException
from ansible.module_utils.six import PY2, PY3
from ansible.module_utils._text import to_native, to_bytes
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
testcase_data = {
"params": {'orgs': ['orgs.json'],
}
}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
# try:
data = json.loads(data)
# except Exception:
# pass
fixture_data[path] = data
return data
@pytest.fixture(scope="module")
def module():
argument_spec = meraki_argument_spec()
set_module_args({'auth_key': 'abc123',
})
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
return MerakiModule(module)
def mocked_fetch_url(*args, **kwargs):
print(args)
if args[1] == 'https://api.meraki.com/api/v0/404':
info = {'status': 404,
'msg': '404 - Page is missing',
'url': 'https://api.meraki.com/api/v0/404',
}
info['body'] = '404'
elif args[1] == 'https://api.meraki.com/api/v0/429':
info = {'status': 429,
'msg': '429 - Rate limit hit',
'url': 'https://api.meraki.com/api/v0/429',
}
info['body'] = '429'
return (None, info)
def mocked_fetch_url_rate_success(module, *args, **kwargs):
if module.retry_count == 5:
info = {'status': 200,
'url': 'https://api.meraki.com/api/organization',
}
resp = {'body': 'Succeeded'}
else:
info = {'status': 429,
'msg': '429 - Rate limit hit',
'url': 'https://api.meraki.com/api/v0/429',
}
info['body'] = '429'
return (resp, info)
def mocked_fail_json(*args, **kwargs):
pass
def mocked_sleep(*args, **kwargs):
pass
def test_fetch_url_404(module, mocker):
url = '404'
mocker.patch('ansible.module_utils.network.meraki.meraki.fetch_url', side_effect=mocked_fetch_url)
mocker.patch('ansible.module_utils.network.meraki.meraki.MerakiModule.fail_json', side_effect=mocked_fail_json)
with pytest.raises(HTTPError):
data = module.request(url, method='GET')
assert module.status == 404
def test_fetch_url_429(module, mocker):
url = '429'
mocker.patch('ansible.module_utils.network.meraki.meraki.fetch_url', side_effect=mocked_fetch_url)
mocker.patch('ansible.module_utils.network.meraki.meraki.MerakiModule.fail_json', side_effect=mocked_fail_json)
mocker.patch('time.sleep', return_value=None)
with pytest.raises(RateLimitException):
data = module.request(url, method='GET')
assert module.status == 429
def test_fetch_url_429_success(module, mocker):
url = '429'
mocker.patch('ansible.module_utils.network.meraki.meraki.fetch_url', side_effect=mocked_fetch_url_rate_success)
mocker.patch('ansible.module_utils.network.meraki.meraki.MerakiModule.fail_json', side_effect=mocked_fail_json)
mocker.patch('time.sleep', return_value=None)
# assert module.status == 200
def test_define_protocol_https(module):
module.params['use_https'] = True
module.define_protocol()
testdata = module.params['protocol']
assert testdata == 'https'
def test_define_protocol_http(module):
module.params['use_https'] = False
module.define_protocol()
testdata = module.params['protocol']
assert testdata == 'http'
def test_is_org_valid_org_name(module):
data = load_fixture('orgs.json')
org_count = module.is_org_valid(data, org_name="My organization")
assert org_count == 1
def test_is_org_valid_org_id(module):
data = load_fixture('orgs.json')
org_count = module.is_org_valid(data, org_id=2930418)
assert org_count == 1
| gpl-3.0 |
eHealthAfrica/kivy | kivy/input/providers/linuxwacom.py | 51 | 14837 | '''
Native support of Wacom tablet from linuxwacom driver
=====================================================
To configure LinuxWacom, add this to your configuration::
[input]
pen = linuxwacom,/dev/input/event2,mode=pen
finger = linuxwacom,/dev/input/event3,mode=touch
.. note::
You must have read access to the input event.
You can use a custom range for the X, Y and pressure values.
On some drivers, the range reported is invalid.
To fix that, you can add these options to the argument line:
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
'''
__all__ = ('LinuxWacomMotionEventProvider', 'LinuxWacomMotionEvent')
import os
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class LinuxWacomMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = ShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(LinuxWacomMotionEvent, self).depack(args)
def __str__(self):
return '<LinuxWacomMotionEvent id=%d pos=(%f, %f) device=%s>' \
% (self.id, self.sx, self.sy, self.device)
if 'KIVY_DOC' in os.environ:
# documentation hack
LinuxWacomMotionEventProvider = None
else:
import threading
import collections
import struct
import fcntl
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.logger import Logger
#
# This part is taken from linux-source-2.6.32/include/linux/input.h
#
# Event types
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_SW = 0x05
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
EV_CNT = (EV_MAX + 1)
KEY_MAX = 0x2ff
# Synchronization events
SYN_REPORT = 0
SYN_CONFIG = 1
SYN_MT_REPORT = 2
# Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
MSC_CNT = (MSC_MAX + 1)
ABS_X = 0x00
ABS_Y = 0x01
ABS_PRESSURE = 0x18
ABS_MISC = 0x28 # if 0, it's touch up
ABS_MT_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
ABS_MT_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
ABS_MT_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
ABS_MT_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
ABS_MT_ORIENTATION = 0x34 # Ellipse orientation
ABS_MT_POSITION_X = 0x35 # Center X ellipse position
ABS_MT_POSITION_Y = 0x36 # Center Y ellipse position
ABS_MT_TOOL_TYPE = 0x37 # Type of touching device
ABS_MT_BLOB_ID = 0x38 # Group a set of packets as a blob
ABS_MT_TRACKING_ID = 0x39 # Unique ID of initiated contact
ABS_MT_PRESSURE = 0x3a # Pressure on contact area
# some ioctl base (with 0 value)
EVIOCGNAME = 2147501318
EVIOCGBIT = 2147501344
EVIOCGABS = 2149074240
# sizeof(struct input_event)
struct_input_event_sz = struct.calcsize('LLHHi')
struct_input_absinfo_sz = struct.calcsize('iiiiii')
sz_l = struct.calcsize('Q')
class LinuxWacomMotionEventProvider(MotionEventProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'invert_x', 'invert_y')
def __init__(self, device, args):
super(LinuxWacomMotionEventProvider, self).__init__(device, args)
self.input_fn = None
self.default_ranges = dict()
self.mode = 'touch'
# split arguments
args = args.split(',')
if not args:
Logger.error('LinuxWacom: No filename given in config')
Logger.error('LinuxWacom: Use /dev/input/event0 for example')
return None
# read filename
self.input_fn = args[0]
Logger.info('LinuxWacom: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
err = 'LinuxWacom: Bad parameter' \
'%s: Not in key=value format.' % arg
Logger.error(err)
continue
# ensure the key exist
key, value = arg
if key == 'mode':
self.mode = value
continue
if key not in LinuxWacomMotionEventProvider.options:
Logger.error('LinuxWacom: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
err = 'LinuxWacom: value %s invalid for %s' % (key, value)
Logger.error(err)
continue
# all good!
msg = 'LinuxWacom: Set custom %s to %d' % (key, int(value))
Logger.info(msg)
Logger.info('LinuxWacom: mode is <%s>' % self.mode)
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
l_points = {}
# prepare some vars to get limit of some component
range_min_position_x = 0
range_max_position_x = 2048
range_min_position_y = 0
range_max_position_y = 2048
range_min_pressure = 0
range_max_pressure = 255
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
reset_touch = False
def process(points):
actives = list(points.keys())
for args in points.values():
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = LinuxWacomMotionEvent(device, tid, args)
touches[touch.id] = touch
if touch.sx == args['x'] \
and touch.sy == args['y'] \
and tid in touches_sent:
continue
touch.move(args)
if tid not in touches_sent:
queue.append(('begin', touch))
touches_sent.append(tid)
queue.append(('update', touch))
for tid in list(touches.keys())[:]:
if tid not in actives:
touch = touches[tid]
if tid in touches_sent:
touch.update_time_end()
queue.append(('end', touch))
touches_sent.remove(tid)
del touches[tid]
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open the input
try:
fd = open(input_fn, 'rb')
except IOError:
Logger.exception('Unable to open %s' % input_fn)
return
# get the controler name (EVIOCGNAME)
device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
" " * 256).split('\x00')[0]
Logger.info('LinuxWacom: using <%s>' % device_name)
# get abs infos
bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
bit, = struct.unpack('Q', bit)
for x in range(EV_MAX):
# preserve this, we may want other things than EV_ABS
if x != EV_ABS:
continue
# EV_ABS available for this device ?
if (bit & (1 << x)) == 0:
continue
# ask abs info keys to the devices
sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
' ' * sz_l)
sbit, = struct.unpack('Q', sbit)
for y in range(KEY_MAX):
if (sbit & (1 << y)) == 0:
continue
absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
(struct_input_absinfo_sz << 16),
' ' * struct_input_absinfo_sz)
abs_value, abs_min, abs_max, abs_fuzz, \
abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
if y == ABS_X:
range_min_position_x = drs('min_position_x', abs_min)
range_max_position_x = drs('max_position_x', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position X is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_Y:
range_min_position_y = drs('min_position_y', abs_min)
range_max_position_y = drs('max_position_y', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range position Y is %d - %d' % (
device_name, abs_min, abs_max))
elif y == ABS_PRESSURE:
range_min_pressure = drs('min_pressure', abs_min)
range_max_pressure = drs('max_pressure', abs_max)
Logger.info('LinuxWacom: ' +
'<%s> range pressure is %d - %d' % (
device_name, abs_min, abs_max))
# read until the end
changed = False
touch_id = 0
touch_x = 0
touch_y = 0
touch_pressure = 0
while fd:
data = fd.read(struct_input_event_sz)
if len(data) < struct_input_event_sz:
break
# extract each event
for i in range(len(data) / struct_input_event_sz):
ev = data[i * struct_input_event_sz:]
# extract timeval + event infos
tv_sec, tv_usec, ev_type, ev_code, ev_value = \
struct.unpack('LLHHi', ev[:struct_input_event_sz])
if ev_type == EV_SYN and ev_code == SYN_REPORT:
if touch_id in l_points:
p = l_points[touch_id]
else:
p = dict()
l_points[touch_id] = p
p['id'] = touch_id
if reset_touch is False:
p['x'] = touch_x
p['y'] = touch_y
p['pressure'] = touch_pressure
if self.mode == 'pen' \
and touch_pressure == 0 \
and not reset_touch:
del l_points[touch_id]
if changed:
if not 'x' in p:
reset_touch = False
continue
process(l_points)
changed = False
if reset_touch:
l_points.clear()
reset_touch = False
process(l_points)
elif ev_type == EV_MSC and ev_code == MSC_SERIAL:
touch_id = ev_value
elif ev_type == EV_ABS and ev_code == ABS_X:
val = normalize(ev_value,
range_min_position_x,
range_max_position_x)
if invert_x:
val = 1. - val
touch_x = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_Y:
val = 1. - normalize(ev_value,
range_min_position_y,
range_max_position_y)
if invert_y:
val = 1. - val
touch_y = val
changed = True
elif ev_type == EV_ABS and ev_code == ABS_PRESSURE:
touch_pressure = normalize(ev_value,
range_min_pressure,
range_max_pressure)
changed = True
elif ev_type == EV_ABS and ev_code == ABS_MISC:
if ev_value == 0:
reset_touch = True
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
MotionEventFactory.register('linuxwacom', LinuxWacomMotionEventProvider)
| mit |
jelugbo/tundex | lms/djangoapps/courseware/migrations/0003_done_grade_cache.py | 194 | 8745 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# NOTE (vshnayder): This constraint has the wrong field order, so it doesn't actually
# do anything in sqlite. Migration 0004 actually removes this index for sqlite.
# Removing unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'module_type', 'student_id'])
# Adding field 'StudentModule.max_grade'
db.add_column('courseware_studentmodule', 'max_grade', self.gf('django.db.models.fields.FloatField')(null=True, blank=True), keep_default=False)
# Adding field 'StudentModule.done'
db.add_column('courseware_studentmodule', 'done', self.gf('django.db.models.fields.CharField')(default='na', max_length=8, db_index=True), keep_default=False)
# Adding unique constraint on 'StudentModule', fields ['module_id', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'student_id'])
def backwards(self, orm):
# Removing unique constraint on 'StudentModule', fields ['module_id', 'student']
db.delete_unique('courseware_studentmodule', ['module_id', 'student_id'])
# Deleting field 'StudentModule.max_grade'
db.delete_column('courseware_studentmodule', 'max_grade')
# Deleting field 'StudentModule.done'
db.delete_column('courseware_studentmodule', 'done')
# Adding unique constraint on 'StudentModule', fields ['module_id', 'module_type', 'student']
db.create_unique('courseware_studentmodule', ['module_id', 'module_type', 'student_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_id'),)", 'object_name': 'StudentModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
| agpl-3.0 |
dexterx17/nodoSocket | clients/Python-2.7.6/Mac/Modules/dlg/dlgscan.py | 34 | 4003 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "Dialogs"
SHORT = "dlg"
OBJECT = "DialogPtr"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[0]
if t in ("DialogPtr", "DialogRef") and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
'InitDialogs',
'ErrorSound',
# Dialogs are disposed when the object is deleted
'CloseDialog',
'DisposDialog',
'DisposeDialog',
'UpdtDialog',
'CouldAlert',
'FreeAlert',
'CouldDialog',
'FreeDialog',
'GetStdFilterProc',
'GetDialogParent',
## # Can't find these in the CW Pro 3 libraries
'SetDialogMovableModal',
'GetDialogControlNotificationProc',
'SetGrafPortOfDialog', # Funny, and probably not useful
# Can't find these:
'CloseStandardSheet',
'RunStandardAlert',
]
def makeblacklisttypes(self):
return [
"AlertStdAlertParamPtr", # Too much work, for now
"AlertStdAlertParamRec", # ditto
"AlertStdAlertParamRec_ptr", # ditto
"AlertStdCFStringAlertParamPtr", # ditto
"AlertStdCFStringAlertParamRec",
"AlertStdCFStringAlertParamRec_ptr",
"QTModelessCallbackProcPtr",
]
def makerepairinstructions(self):
return [
([("Str255", "*", "InMode")],
[("*", "*", "OutMode")]),
([("void_ptr", "*", "InMode"), ("long", "*", "InMode")],
[("InBuffer", "*", "*")]),
([("void", "*", "OutMode"), ("long", "*", "InMode"),
("long", "*", "OutMode")],
[("VarVarOutBuffer", "*", "InOutMode")]),
# GetDialogItem return handle is optional
([("Handle", "item", "OutMode")],
[("OptHandle", "item", "OutMode")]),
# NewDialog ETC.
([("void", "*", "OutMode")],
[("NullStorage", "*", "InMode")]),
([("DialogPtr", "*", "OutMode")],
[("ExistingDialogPtr", "*", "*")]),
([("DialogRef", "*", "OutMode")],
[("ExistingDialogPtr", "*", "*")]),
([("WindowPtr", "*", "OutMode")],
[("ExistingWindowPtr", "*", "*")]),
([("WindowPtr", "*", "ReturnMode")],
[("ExistingWindowPtr", "*", "*")]),
# StdFilterProc
([('EventRecord', 'event', 'OutMode'),
('DialogItemIndex', 'itemHit', 'OutMode')],
[('EventRecord', 'event', 'InOutMode'),
('DialogItemIndex', 'itemHit', 'InOutMode')])
]
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
if __name__ == "__main__":
main()
| mit |
edilio/django-rest-swagger | rest_framework_swagger/views.py | 1 | 2830 | import json
from django.views.generic import View
from django.utils.safestring import mark_safe
from django.shortcuts import render_to_response, RequestContext
from django.core.exceptions import PermissionDenied
from rest_framework.views import Response
from rest_framework_swagger.urlparser import UrlParser
from rest_framework_swagger.apidocview import APIDocView
from rest_framework.renderers import JSONRenderer
from rest_framework_swagger.docgenerator import DocumentationGenerator
from rest_framework_swagger import SWAGGER_SETTINGS
class SwaggerUIView(View):
def get(self, request, *args, **kwargs):
if not self.has_permission(request):
raise PermissionDenied()
template_name = "rest_framework_swagger/index.html"
data = {
'swagger_settings': {
'discovery_url': "%sapi-docs/" % request.build_absolute_uri(),
'api_key': SWAGGER_SETTINGS.get('api_key', ''),
'token_type': SWAGGER_SETTINGS.get('token_type'),
'enabled_methods': mark_safe(
json.dumps( SWAGGER_SETTINGS.get('enabled_methods')))
}
}
response = render_to_response(template_name, RequestContext(request, data))
return response
def has_permission(self, request):
if SWAGGER_SETTINGS.get('is_superuser') and not request.user.is_superuser:
return False
if SWAGGER_SETTINGS.get('is_authenticated') and not request.user.is_authenticated():
return False
return True
class SwaggerResourcesView(APIDocView):
renderer_classes = (JSONRenderer,)
def get(self, request):
apis = []
resources = self.get_resources()
for path in resources:
apis.append({
'path': "/%s" % path,
})
return Response({
'apiVersion': SWAGGER_SETTINGS.get('api_version', ''),
'swaggerVersion': '1.2',
'basePath': self.host.rstrip('/'),
'apis': apis
})
def get_resources(self):
urlparser = UrlParser()
apis = urlparser.get_apis(exclude_namespaces=SWAGGER_SETTINGS.get('exclude_namespaces'))
resources = urlparser.get_top_level_apis(apis)
return resources
class SwaggerApiView(APIDocView):
renderer_classes = (JSONRenderer,)
def get(self, request, path):
apis = self.get_api_for_resource(path)
generator = DocumentationGenerator()
return Response({
'apis': generator.generate(apis),
'models': generator.get_models(apis),
'basePath': self.api_full_uri.rstrip('/'),
})
def get_api_for_resource(self, filter_path):
urlparser = UrlParser()
return urlparser.get_apis(filter_path=filter_path)
| bsd-2-clause |
no2key/dragonflow | dragonflow/tests/unit/test_openflow_app.py | 1 | 13380 | # Copyright (c) 2015 OpenStack Foundation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import constants
from neutron import context as neutron_context
from neutron.tests import base as tests_base
# TODO(gsagie) use this until Ryu is in the requierments.txt
try:
from dragonflow.controller import l3_openflow_app as of_app
except Exception:
of_app = None
def ryu_enabled(func):
def func_wrapper(*args, **kwargs):
if of_app is None:
return
return func(*args, **kwargs)
return func_wrapper
_FAKE_TENANT_ID_1 = 'abcd123'
_SEGMENTATION_ID_A = 1024
_SEGMENTATION_ID_B = 1025
_SUBNET_A_ID = 'subneta'
_SUBNET_A_NET = '10.0.1.0'
_SUBNET_A_MASK = '24'
_SUBNET_A_CIDR = _SUBNET_A_NET + '/' + _SUBNET_A_MASK
_PORT_SUBNET_A_IP = '10.0.1.5'
_PORT_SUBNET_A_MAC = "00:00:00:00:00:01"
_PORT_SUBNET_A_ID = '1234'
_PORT_SUBNET_A_LOCAL_NUM = 1
_SUBNET_B_ID = 'subnetb'
_SUBNET_B_NET = '10.0.2.0'
_SUBNET_B_MASK = '24'
_SUBNET_B_CIDR = _SUBNET_B_NET + '/' + _SUBNET_B_MASK
_PORT_SUBNET_B_IP = '10.0.2.5'
_PORT_SUBNET_B_MAC = "00:00:00:00:00:02"
_PORT_SUBNET_B_ID = '5678'
_PORT_SUBNET_B_LOCAL_NUM = 2
_ROUTER_ID = 'routerA'
_DP_1_ID = 123
class TestOpenflowApp(tests_base.BaseTestCase):
def setUp(self):
super(TestOpenflowApp, self).setUp()
if of_app is None:
return
self.admin_ctx = mock.patch.object(neutron_context,
"get_admin_context").start()
self.l3_app = of_app.L3ReactiveApp(None, idle_timeout=0,
hard_timeout=0)
self._mock_bootstrap_flows_creation()
@ryu_enabled
def test_app_created_and_mock_enabled(self):
self.assertEqual(self.admin_ctx.call_count, 1)
@ryu_enabled
def test_create_simple_env_router_first(self):
self.l3_app.subnet_added_binding_cast = mock.Mock()
self.l3_app.bootstrap_network_classifiers = mock.Mock()
self._create_env(router_first=True)
self._assert_environment_creation()
@ryu_enabled
def test_create_simple_env_ports_first(self):
self.l3_app.subnet_added_binding_cast = mock.Mock()
self.l3_app.bootstrap_network_classifiers = mock.Mock()
self._create_env()
self._assert_environment_creation()
@ryu_enabled
def test_switch_features_handler(self):
ev = mock.Mock()
ev.msg.datapath = self._create_datapath_mock(_DP_1_ID)
self.l3_app.switch_features_handler(ev)
self.assertTrue(self.l3_app.send_port_desc_stats_request.called)
# Check that the normal flow was called with table '0' and
# lowest priority '0'
table_arg = 1
priority_arg = 2
self.assertEqual(
self.l3_app.add_flow_normal.call_args[0][table_arg], 0)
self.assertEqual(
self.l3_app.add_flow_normal.call_args[0][priority_arg], 0)
# Assert data path was added with id
self.assertIsNotNone(self.l3_app.dp_list.get(_DP_1_ID))
@ryu_enabled
def test_port_desc_handler(self):
ev = mock.Mock()
ev.msg.datapath = self._create_datapath_mock(_DP_1_ID)
self.l3_app.switch_features_handler(ev)
ev.msg.body = self._create_dp_ports_mock()
self.l3_app.port_desc_stats_reply_handler(ev)
# Assert these methods weren't called since we dont have port
# segmentation id yet from neutron
self.assertFalse(self.l3_app.add_flow_metadata_by_port_num.called)
self.assertFalse(self.l3_app._add_vrouter_arp_responder.called)
self.assertFalse(self.l3_app.add_flow_normal_local_subnet.called)
@ryu_enabled
def test_new_subnet_installed_order_ports_router_dp(self):
self._create_env()
self._install_new_datapath()
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_router_ports_dp(self):
self._create_env(router_first=True)
self._install_new_datapath()
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_dp_router_ports(self):
self._install_new_datapath()
self._create_env(router_first=True)
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_dp_ports_router(self):
self._install_new_datapath()
self._create_env()
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_ports_dp_router(self):
port_a = self._create_router_port_subnet_a()
port_b = self._create_router_port_subnet_b()
router = self._create_router()
self.l3_app.sync_port(port_a)
self.l3_app.sync_port(port_b)
self._install_new_datapath()
self.l3_app.sync_router(router)
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_router_dp_ports(self):
port_a = self._create_router_port_subnet_a()
port_b = self._create_router_port_subnet_b()
router = self._create_router()
self.l3_app.sync_router(router)
self._install_new_datapath()
self.l3_app.sync_port(port_a)
self.l3_app.sync_port(port_b)
self._assert_empty_subnet_installed()
@ryu_enabled
def test_new_subnet_installed_order_dp_features_env_port_desc(self):
ev = mock.Mock()
ev.msg.datapath = self._create_datapath_mock(_DP_1_ID)
self.l3_app.switch_features_handler(ev)
self._create_env()
ev.msg.body = self._create_dp_ports_mock()
self.l3_app.port_desc_stats_reply_handler(ev)
self._assert_empty_subnet_installed()
def _assert_empty_subnet_installed(self):
seg_id_arg_flow_metadata = 4
self.assertEqual(self.l3_app.add_flow_metadata_by_port_num.call_count,
2)
seg_ids = set()
for i in range(self.l3_app.add_flow_metadata_by_port_num.call_count):
seg_ids.add(self.l3_app.add_flow_metadata_by_port_num.
call_args_list[i][0][seg_id_arg_flow_metadata])
self.assertEqual({_SEGMENTATION_ID_A, _SEGMENTATION_ID_B}, seg_ids)
seg_id_arg_vrouter_arp = 1
self.assertTrue(
self.l3_app._add_vrouter_arp_responder.call_count >= 2)
seg_ids = set()
for i in range(self.l3_app._add_vrouter_arp_responder.call_count):
seg_ids.add(self.l3_app._add_vrouter_arp_responder.
call_args_list[i][0][seg_id_arg_vrouter_arp])
self.assertEqual({_SEGMENTATION_ID_A, _SEGMENTATION_ID_B}, seg_ids)
mac_id_arg_vrouter_arp = 2
macs = set()
for i in range(self.l3_app._add_vrouter_arp_responder.call_count):
macs.add(self.l3_app._add_vrouter_arp_responder.
call_args_list[i][0][mac_id_arg_vrouter_arp])
self.assertEqual({_PORT_SUBNET_A_MAC, _PORT_SUBNET_B_MAC}, macs)
interfaces_arg_vrouter_arp = 3
interfaces = set()
for i in range(self.l3_app._add_vrouter_arp_responder.call_count):
interfaces.add(self.l3_app._add_vrouter_arp_responder.
call_args_list[i][0][interfaces_arg_vrouter_arp])
self.assertTrue({_PORT_SUBNET_A_IP, _PORT_SUBNET_B_IP}, interfaces)
self.assertTrue(
self.l3_app.add_flow_normal_local_subnet.call_count >= 2)
seg_id_arg_flow_normal = 5
seg_ids = set()
for i in range(self.l3_app.add_flow_normal_local_subnet.call_count):
seg_ids.add(self.l3_app.add_flow_normal_local_subnet.
call_args_list[i][0][seg_id_arg_flow_normal])
self.assertEqual({_SEGMENTATION_ID_A, _SEGMENTATION_ID_B}, seg_ids)
dst_net_arg_flow_normal = 3
dst_net = set()
for i in range(self.l3_app.add_flow_normal_local_subnet.call_count):
dst_net.add(self.l3_app.add_flow_normal_local_subnet.
call_args_list[i][0][dst_net_arg_flow_normal])
self.assertEqual({_SUBNET_A_NET, _SUBNET_B_NET}, dst_net)
dst_mask_arg_flow_normal = 4
dst_mask = set()
for i in range(self.l3_app.add_flow_normal_local_subnet.call_count):
dst_mask.add(self.l3_app.add_flow_normal_local_subnet.
call_args_list[i][0][dst_mask_arg_flow_normal])
self.assertTrue({_SUBNET_A_MASK, _SUBNET_B_MASK}, dst_mask)
# Validate datapath bootstrap was called correctly
self.assertEqual(self.l3_app.add_flow_go_to_table_on_arp.call_count,
1)
self.assertEqual(self.l3_app.add_flow_goto_normal_on_broad.call_count,
1)
self.assertEqual(self.l3_app.add_flow_goto_normal_on_mcast.call_count,
1)
def _install_new_datapath(self):
ev = mock.Mock()
ev.msg.datapath = self._create_datapath_mock(_DP_1_ID)
self.l3_app.switch_features_handler(ev)
ev.msg.body = self._create_dp_ports_mock()
self.l3_app.port_desc_stats_reply_handler(ev)
def _mock_bootstrap_flows_creation(self):
self.l3_app.add_flow_go_to_table2 = mock.Mock()
self.l3_app.bootstrap_network_classifiers = mock.Mock()
self.l3_app.add_flow_go_to_table_on_arp = mock.Mock()
self.l3_app.add_flow_goto_normal_on_broad = mock.Mock()
self.l3_app.add_flow_goto_normal_on_mcast = mock.Mock()
self.l3_app.add_flow_normal = mock.Mock()
self.l3_app.add_flow_metadata_by_port_num = mock.Mock()
self.l3_app._add_vrouter_arp_responder = mock.Mock()
self.l3_app.add_flow_normal_local_subnet = mock.Mock()
self.l3_app.append_port_data_to_ports = mock.Mock()
self.l3_app.send_port_desc_stats_request = mock.Mock()
def _create_dp_ports_mock(self):
port1 = mock.Mock()
port2 = mock.Mock()
port1.name = 'qr-' + _PORT_SUBNET_A_ID
port2.name = 'qr-' + _PORT_SUBNET_B_ID
port1.port_no = _PORT_SUBNET_A_LOCAL_NUM
port2.port_no = _PORT_SUBNET_B_LOCAL_NUM
return [port1, port2]
def _create_datapath_mock(self, id):
dp = mock.Mock()
dp.id = id
dp.ofproto_parser = mock.Mock()
dp.ofproto = mock.Mock()
return dp
def _assert_environment_creation(self):
self.assertEqual(len(self.l3_app._tenants), 1)
tenant = self.l3_app.get_tenant_by_id(_FAKE_TENANT_ID_1)
self.assertEqual(len(tenant.mac_to_port_data), 2)
self.assertTrue(
self.l3_app.subnet_added_binding_cast.call_count >= 2)
self.assertTrue(
self.l3_app.bootstrap_network_classifiers.call_count >= 2)
subnets = tenant.subnets
for id, subnet in subnets.items():
self.assertIsNotNone(subnet.segmentation_id)
self.assertNotEqual(subnet.segmentation_id, 0)
def _create_env(self, router_first=False):
port_a = self._create_router_port_subnet_a()
port_b = self._create_router_port_subnet_b()
router = self._create_router()
if router_first:
self.l3_app.sync_router(router)
self.l3_app.sync_port(port_a)
self.l3_app.sync_port(port_b)
if not router_first:
self.l3_app.sync_router(router)
def _create_router(self):
router_info = {}
router_info['id'] = _ROUTER_ID
router_info['tenant_id'] = _FAKE_TENANT_ID_1
port_a = self._create_router_port_subnet_a()
port_b = self._create_router_port_subnet_b()
router_info['_interfaces'] = [port_a, port_b]
return router_info
def _create_router_port_subnet_a(self):
port = {}
port['id'] = _PORT_SUBNET_A_ID
port['tenant_id'] = _FAKE_TENANT_ID_1
port['segmentation_id'] = _SEGMENTATION_ID_A
port['mac_address'] = _PORT_SUBNET_A_MAC
port['device_owner'] = constants.DEVICE_OWNER_ROUTER_INTF
subnet = dict(id=_SUBNET_A_ID, cidr=_SUBNET_A_CIDR)
ip_addr = dict(subnet_id=_SUBNET_A_ID, ip_address=_PORT_SUBNET_A_IP)
port['fixed_ips'] = [ip_addr]
port['subnets'] = [subnet]
return port
def _create_router_port_subnet_b(self):
port = {}
port['id'] = _PORT_SUBNET_B_ID
port['tenant_id'] = _FAKE_TENANT_ID_1
port['segmentation_id'] = _SEGMENTATION_ID_B
port['mac_address'] = _PORT_SUBNET_B_MAC
port['device_owner'] = constants.DEVICE_OWNER_ROUTER_INTF
subnet = dict(id=_SUBNET_B_ID, cidr=_SUBNET_B_CIDR)
ip_addr = dict(subnet_id=_SUBNET_B_ID, ip_address=_PORT_SUBNET_B_IP)
port['fixed_ips'] = [ip_addr]
port['subnets'] = [subnet]
return port
| apache-2.0 |
romain-li/edx-platform | openedx/core/djangoapps/api_admin/migrations/0002_auto_20160325_1604.py | 53 | 1096 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
API_GROUP_NAME = 'API Access Request Approvers'
def add_api_access_group(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
ApiAccessRequest = apps.get_model('api_admin', 'ApiAccessRequest')
group, __ = Group.objects.get_or_create(name=API_GROUP_NAME)
api_content_type = ContentType.objects.get_for_model(ApiAccessRequest)
group.permissions = Permission.objects.filter(content_type=api_content_type)
group.save()
def delete_api_access_group(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
Group.objects.filter(name=API_GROUP_NAME).delete()
class Migration(migrations.Migration):
dependencies = [
('api_admin', '0001_initial'),
('contenttypes', '0002_remove_content_type_name')
]
operations = [
migrations.RunPython(add_api_access_group, delete_api_access_group)
]
| agpl-3.0 |
eaas-framework/virtualbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Core/PackageFile.py | 11 | 8362 | ## @file
#
# PackageFile class represents the zip file of a distribution package.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
PackageFile
'''
##
# Import Modules
#
import os.path
import zipfile
import tempfile
import platform
from Logger.ToolError import FILE_OPEN_FAILURE
from Logger.ToolError import FILE_CHECKSUM_FAILURE
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import FILE_DECOMPRESS_FAILURE
from Logger.ToolError import FILE_UNKNOWN_ERROR
from Logger.ToolError import FILE_WRITE_FAILURE
from Logger.ToolError import FILE_COMPRESS_FAILURE
import Logger.Log as Logger
from Logger import StringTable as ST
from Library.Misc import CreateDirectory
from Library.Misc import RemoveDirectory
class PackageFile:
def __init__(self, FileName, Mode="r"):
self._FileName = FileName
if Mode not in ["r", "w", "a"]:
Mode = "r"
try:
self._ZipFile = zipfile.ZipFile(FileName, Mode, \
zipfile.ZIP_DEFLATED)
self._Files = {}
for Filename in self._ZipFile.namelist():
self._Files[os.path.normpath(Filename)] = Filename
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_OPEN_FAILURE,
ExtraData="%s (%s)" % (FileName, str(Xstr)))
BadFile = self._ZipFile.testzip()
if BadFile != None:
Logger.Error("PackagingTool", FILE_CHECKSUM_FAILURE,
ExtraData="[%s] in %s" % (BadFile, FileName))
def GetZipFile(self):
return self._ZipFile
## Get file name
#
def __str__(self):
return self._FileName
## Extract the file
#
# @param To: the destination file
#
def Unpack(self, ToDest):
for FileN in self._ZipFile.namelist():
ToFile = os.path.normpath(os.path.join(ToDest, FileN))
Msg = "%s -> %s" % (FileN, ToFile)
Logger.Info(Msg)
self.Extract(FileN, ToFile)
## Extract the file
#
# @param File: the extracted file
# @param ToFile: the destination file
#
def UnpackFile(self, File, ToFile):
File = File.replace('\\', '/')
if File in self._ZipFile.namelist():
Msg = "%s -> %s" % (File, ToFile)
Logger.Info(Msg)
self.Extract(File, ToFile)
return ToFile
return ''
## Extract the file
#
# @param Which: the source path
# @param To: the destination path
#
def Extract(self, Which, ToDest):
Which = os.path.normpath(Which)
if Which not in self._Files:
Logger.Error("PackagingTool", FILE_NOT_FOUND,
ExtraData="[%s] in %s" % (Which, self._FileName))
try:
FileContent = self._ZipFile.read(self._Files[Which])
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_DECOMPRESS_FAILURE,
ExtraData="[%s] in %s (%s)" % (Which, \
self._FileName, \
str(Xstr)))
try:
CreateDirectory(os.path.dirname(ToDest))
if os.path.exists(ToDest) and not os.access(ToDest, os.W_OK):
Logger.Warn("PackagingTool", \
ST.WRN_FILE_NOT_OVERWRITTEN % ToDest)
return
ToFile = open(ToDest, "wb")
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_OPEN_FAILURE,
ExtraData="%s (%s)" % (ToDest, str(Xstr)))
try:
ToFile.write(FileContent)
ToFile.close()
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_WRITE_FAILURE,
ExtraData="%s (%s)" % (ToDest, str(Xstr)))
## Remove the file
#
# @param Files: the removed files
#
def Remove(self, Files):
TmpDir = os.path.join(tempfile.gettempdir(), ".packaging")
if os.path.exists(TmpDir):
RemoveDirectory(TmpDir, True)
os.mkdir(TmpDir)
self.Unpack(TmpDir)
for SinF in Files:
SinF = os.path.normpath(SinF)
if SinF not in self._Files:
Logger.Error("PackagingTool", FILE_NOT_FOUND,
ExtraData="%s is not in %s!" % \
(SinF, self._FileName))
self._Files.pop(SinF)
self._ZipFile.close()
self._ZipFile = zipfile.ZipFile(self._FileName, "w", \
zipfile.ZIP_DEFLATED)
Cwd = os.getcwd()
os.chdir(TmpDir)
self.PackFiles(self._Files)
os.chdir(Cwd)
RemoveDirectory(TmpDir, True)
## Pack the files under Top directory, the directory shown in the zipFile start from BaseDir,
# BaseDir should be the parent directory of the Top directory, for example,
# Pack(Workspace\Dir1, Workspace) will pack files under Dir1, and the path in the zipfile will
# start from Workspace
#
# @param Top: the top directory
# @param BaseDir: the base directory
#
def Pack(self, Top, BaseDir):
if not os.path.isdir(Top):
Logger.Error("PackagingTool", FILE_UNKNOWN_ERROR, \
"%s is not a directory!" %Top)
FilesToPack = []
Cwd = os.getcwd()
os.chdir(BaseDir)
RelaDir = Top[Top.upper().find(BaseDir.upper()).\
join(len(BaseDir).join(1)):]
for Root, Dirs, Files in os.walk(RelaDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for File1 in Files:
if File1.startswith('.'):
continue
ExtName = os.path.splitext(File1)[1]
#
# skip '.dec', '.inf', '.dsc', '.fdf' files
#
if ExtName.lower() in ['.dec', '.inf', '.dsc', '.fdf']:
continue
FilesToPack.append(os.path.join(Root, File1))
self.PackFiles(FilesToPack)
os.chdir(Cwd)
## Pack the file
#
# @param Files: the files to pack
#
def PackFiles(self, Files):
for File1 in Files:
self.PackFile(File1)
## Pack the file
#
# @param File: the files to pack
# @param ArcName: the Arc Name
#
def PackFile(self, File, ArcName=None):
try:
#
# avoid packing same file multiple times
#
if platform.system() != 'Windows':
File = File.replace('\\', '/')
ZipedFilesNameList = self._ZipFile.namelist()
for ZipedFile in ZipedFilesNameList:
if File == os.path.normpath(ZipedFile):
return
Logger.Info("packing ..." + File)
self._ZipFile.write(File, ArcName)
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_COMPRESS_FAILURE,
ExtraData="%s (%s)" % (File, str(Xstr)))
## Write data to the packed file
#
# @param Data: data to write
# @param ArcName: the Arc Name
#
def PackData(self, Data, ArcName):
try:
self._ZipFile.writestr(ArcName, Data)
except BaseException, Xstr:
Logger.Error("PackagingTool", FILE_COMPRESS_FAILURE,
ExtraData="%s (%s)" % (ArcName, str(Xstr)))
## Close file
#
#
def Close(self):
self._ZipFile.close()
| gpl-2.0 |
ludwigschwardt/scikits.fitting | scikits/fitting/spline.py | 1 | 18811 | ###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Spline fitters.
:author: Ludwig Schwardt
:license: Modified BSD
"""
from __future__ import division
from builtins import zip
from builtins import range
import numpy as np
import scipy.interpolate
from .generic import ScatterFit, GridFit, NotFittedError
from .utils import sort_grid, desort_grid
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : Spline1DFit
# ----------------------------------------------------------------------------------------------------------------------
class Spline1DFit(ScatterFit):
"""Fit a B-spline to 1-D data.
This wraps :class:`scipy.interpolate.UnivariateSpline`, which is based on
Paul Dierckx's DIERCKX (or FITPACK) routines (specifically ``curfit`` for
fitting and ``splev`` for evaluation).
Parameters
----------
degree : int, optional
Degree of spline, in range 1-5 [default=3, i.e. cubic B-spline]
min_size : float, optional
Size of smallest features to fit in the data, expressed in units of
*x*. This determines the smoothness of fitted spline. Roughly stated,
any oscillation in the fitted curve will have a period bigger than
*min_size*. Works best if *x* is uniformly spaced.
kwargs : dict, optional
Additional keyword arguments are passed to underlying spline class
"""
def __init__(self, degree=3, min_size=0.0, **kwargs):
ScatterFit.__init__(self)
self.degree = degree
# Size of smallest features to fit
self._min_size = min_size
# Extra keyword arguments to spline class
self._extra_args = kwargs
# Interpolator function, only set after :func:`fit`
self._interp = None
def fit(self, x, y, std_y=1.0):
"""Fit spline to 1-D data.
The minimum number of data points is N = degree + 1.
Parameters
----------
x : array-like, shape (N,)
Known input values as a 1-D numpy array or sequence
y : array-like, shape (N,)
Known output values as a 1-D numpy array, or sequence
std_y : float or array-like, shape (N,), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y` (overrides min_size setting)
Returns
-------
self : :class:`Spline1DFit` object
Reference to self, to allow chaining of method calls
"""
# Check dimensions of known data
x = np.atleast_1d(np.asarray(x))
y = np.atleast_1d(np.asarray(y))
if y.size < self.degree + 1:
raise ValueError("Not enough data points for spline fit: "
"requires at least %d, only got %d" %
(self.degree + 1, y.size))
# Ensure that x is in strictly ascending order
if np.any(np.diff(x) < 0):
sort_ind = x.argsort()
x = x[sort_ind]
y = y[sort_ind]
# Deduce standard deviation of y if not given, based on specified
# size of smallest features
if self._min_size > 0.0 and std_y == 1.0:
# Number of samples, and sample period
# (assuming samples are uniformly spaced in x)
N, xstep = len(x), np.abs(np.mean(np.diff(x)))
# Convert feature size to digital frequency (based on
# k / N = Ts / T using FFT notation). The frequency index k is
# clipped so that k > 0, to avoid including DC power in stdev calc
# (i.e. slowest oscillation is N samples), and k <= N / 2,
# which represents a 2-sample oscillation.
min_freq_ind = np.clip(int(np.round(N * xstep / self._min_size)),
1, N // 2)
# Find power in signal above the minimum cutoff frequency using
# periodogram. Reduce spectral leakage resulting from edge effects
# by removing DC and windowing the signal.
window = np.hamming(N)
periodo = np.abs(np.fft.fft((y - y.mean()) * window)) ** 2
periodo /= (window ** 2).sum()
periodo[1:(N // 2)] *= 2.0
std_y = np.sqrt(np.sum(periodo[min_freq_ind:(N // 2 + 1)]) / N)
# Convert uncertainty into array of shape (N,)
if np.isscalar(std_y):
std_y = np.tile(std_y, y.shape)
std_y = np.atleast_1d(np.asarray(std_y))
# Lower bound on uncertainty is determined by floating-point
# resolution (no upper bound)
np.clip(std_y, max(np.mean(np.abs(y)), 1e-20) * np.finfo(y.dtype).eps,
np.inf, out=std_y)
self._interp = scipy.interpolate.UnivariateSpline(
x, y, w=1. / std_y, k=self.degree, **self._extra_args)
return self
def __call__(self, x):
"""Evaluate spline on new data.
Parameters
----------
x : array-like, shape (M,)
Input to function as a 1-D numpy array, or sequence
Return
------
y : array, shape (M,)
Output of function as a 1-D numpy array
"""
x = np.atleast_1d(np.asarray(x))
if self._interp is None:
raise NotFittedError("Spline not fitted to data yet - "
"first call .fit method")
return self._interp(x)
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : Spline2DScatterFit
# ----------------------------------------------------------------------------------------------------------------------
class Spline2DScatterFit(ScatterFit):
"""Fit a B-spline to scattered 2-D data.
This wraps :class:`scipy.interpolate.SmoothBivariateSpline`, which is based
on Paul Dierckx's DIERCKX (or FITPACK) routines (specifically ``surfit``
for fitting and ``bispev`` for evaluation). The 2-D ``x`` coordinates do
not have to lie on a regular grid, and can be in any order.
Parameters
----------
degree : sequence of 2 ints, optional
Degree (1-5) of spline in x and y directions
kwargs : dict, optional
Additional keyword arguments are passed to underlying spline class
"""
def __init__(self, degree=(3, 3), **kwargs):
ScatterFit.__init__(self)
self.degree = degree
# Extra keyword arguments to spline class
self._extra_args = kwargs
# Interpolator function, only set after :func:`fit`
self._interp = None
def fit(self, x, y, std_y=1.0):
"""Fit spline to 2-D scattered data in unstructured form.
The minimum number of data points is
``N = (degree[0]+1)*(degree[1]+1)``. The 2-D *x* coordinates do not
have to lie on a regular grid, and can be in any order.
Parameters
----------
x : array-like, shape (2, N)
Known input values as a 2-D numpy array, or sequence
y : array-like, shape (N,)
Known output values as a 1-D numpy array, or sequence
std_y : float or array-like, shape (N,), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y`
Returns
-------
self : :class:`Spline2DScatterFit` object
Reference to self, to allow chaining of method calls
"""
# Check dimensions of known data
x = np.atleast_2d(np.asarray(x))
y = np.atleast_1d(np.asarray(y))
if (len(x.shape) != 2) or (x.shape[0] != 2) or (
len(y.shape) != 1) or (y.shape[0] != x.shape[1]):
raise ValueError("Spline interpolator requires input data with "
"shape (2, N) and output data with shape (N,), "
"got %s and %s instead" % (x.shape, y.shape))
if y.size < (self.degree[0] + 1) * (self.degree[1] + 1):
raise ValueError("Not enough data points for spline fit: requires "
"at least %d, only got %d" %
((self.degree[0] + 1) * (self.degree[1] + 1),
y.size))
# Convert uncertainty into array of shape (N,)
if np.isscalar(std_y):
std_y = np.tile(std_y, y.shape)
std_y = np.atleast_1d(np.asarray(std_y))
# Lower bound on uncertainty is determined by floating-point resolution
# (no upper bound)
np.clip(std_y, max(np.mean(np.abs(y)), 1e-20) * np.finfo(y.dtype).eps,
np.inf, out=std_y)
self._interp = scipy.interpolate.SmoothBivariateSpline(
x[0], x[1], y, w=1. / std_y, kx=self.degree[0], ky=self.degree[1],
**self._extra_args)
return self
def __call__(self, x):
"""Evaluate spline on new scattered data.
Parameters
----------
x : array-like, shape (2, M)
Input to function as a 2-D numpy array, or sequence
Returns
-------
y : array, shape (M,)
Output of function as a 1-D numpy array
"""
# Check dimensions
x = np.atleast_2d(np.asarray(x))
if (len(x.shape) != 2) or (x.shape[0] != 2):
raise ValueError("Spline interpolator requires input data "
"with shape (2, M), got %s instead" % (x.shape,))
if self._interp is None:
raise NotFittedError("Spline not fitted to data yet - "
"first call .fit method")
# Loop over individual data points, as underlying bispev routine
# expects regular grid in x
return np.array([self._interp(x[0, n], x[1, n])
for n in range(x.shape[1])]).squeeze()
# ----------------------------------------------------------------------------------------------------------------------
# --- CLASS : Spline2DGridFit
# ----------------------------------------------------------------------------------------------------------------------
class Spline2DGridFit(GridFit):
"""Fit a B-spline to 2-D data on a rectangular grid.
This wraps :mod:`scipy.interpolate.RectBivariateSpline`, which is based on
Paul Dierckx's DIERCKX (or FITPACK) routines (specifically ``regrid`` for
fitting and ``bispev`` for evaluation). The 2-D ``x`` coordinates define a
rectangular grid. They do not have to be in ascending order, as both the
fitting and evaluation routines sort them for you.
Parameters
----------
degree : sequence of 2 ints, optional
Degree (1-5) of spline in x and y directions
kwargs : dict, optional
Additional keyword arguments are passed to underlying spline class
"""
def __init__(self, degree=(3, 3), **kwargs):
GridFit.__init__(self)
self.degree = degree
# Extra keyword arguments to spline class
self._extra_args = kwargs
# Interpolator function, only set after :func:`fit`
self._interp = None
def fit(self, x, y, std_y=None):
"""Fit spline to 2-D data on a rectangular grid.
This fits a scalar function defined on 2-D data to the provided grid.
The first sequence in *x* defines the M 'x' axis ticks (in any order),
while the second sequence in *x* defines the N 'y' axis ticks (also in
any order). The provided function output *y* contains the corresponding
'z' values on the grid, in an array of shape (M, N). The minimum number
of data points is ``(degree[0]+1)*(degree[1]+1)``.
Parameters
----------
x : sequence of 2 sequences, of lengths M and N
Known input grid specified by sequence of 2 sequences of axis ticks
y : array-like, shape (M, N)
Known output values as a 2-D numpy array
std_y : None or float or array-like, shape (M, N), optional
Measurement error or uncertainty of `y` values, expressed as
standard deviation in units of `y`. If None, uncertainty
propagation is disabled (typically to save time as this can be
costly to calculate when M*N is large).
Returns
-------
self : :class:`Spline2DGridFit` object
Reference to self, to allow chaining of method calls
Notes
-----
This propagates uncertainty through the spline fit based on the main
idea of [1]_, as expressed in Eq. (13) in the paper. Take note that
this equation contains an error -- the square brackets on the
right-hand side should enclose the entire sum over i and not just the
summand.
.. [1] Enting, I. G., Trudinger, C. M., and Etheridge, D. M.,
"Propagating data uncertainty through smoothing spline fits,"
Tellus, vol. 58B, pp. 305-309, 2006.
"""
# Check dimensions of known data
x = [np.atleast_1d(np.asarray(ax)) for ax in x]
y = np.atleast_2d(np.asarray(y))
if ((len(x) != 2) or (len(x[0].shape) != 1) or
(len(x[1].shape) != 1) or (len(y.shape) != 2) or
(y.shape[0] != len(x[0])) or (y.shape[1] != len(x[1]))):
raise ValueError("Spline interpolator requires input data with "
"shape [(M,), (N,)] and output data "
"with shape (M, N), got %s and %s instead" %
([ax.shape for ax in x], y.shape))
if y.size < (self.degree[0] + 1) * (self.degree[1] + 1):
raise ValueError("Not enough data points for spline fit: "
"requires at least %d, only got %d" %
((self.degree[0] + 1) * (self.degree[1] + 1),
y.size))
# Ensure that 'x' and 'y' coordinates are both in ascending order
# (requirement of underlying regrid)
xs, ys, zs = sort_grid(x[0], x[1], y)
self._interp = scipy.interpolate.RectBivariateSpline(
xs, ys, zs, kx=self.degree[0], ky=self.degree[1],
**self._extra_args)
# Disable uncertainty propagation if no std_y is given
if std_y is None:
self._std_fitted_y = None
else:
# Uncertainty should have same shape as y
# (or get tiled to that shape if it is scalar)
std_y = np.atleast_2d(np.asarray(std_y))
self._std_fitted_y = (np.tile(std_y, y.shape) if
std_y.shape == (1, 1) else std_y)
if self._std_fitted_y.shape != y.shape:
raise ValueError("Spline interpolator requires uncertainty "
"to be scalar or to have shape "
"%s (same as data), got %s instead" %
(y.shape, self._std_fitted_y.shape))
# Create list of interpolators, one per value in y,
# by setting each y value to 1 in turn (and the rest 0)
self._std_interps = []
testz = np.zeros(zs.size)
for m in range(zs.size):
testz[:] = 0.0
testz[m] = 1.0
interp = scipy.interpolate.RectBivariateSpline(
xs, ys, testz.reshape(zs.shape), kx=self.degree[0],
ky=self.degree[1], **self._extra_args)
self._std_interps.append(interp)
return self
def __call__(self, x, full_output=False):
"""Evaluate spline on a new rectangular grid.
Evaluates the fitted scalar function on 2-D grid provided in *x*. The
first sequence in *x* defines the K 'x' axis ticks (in any order),
while the second sequence in *x* defines the L 'y' axis ticks (also in
any order). The function returns the corresponding 'z' values on the
grid, in an array of shape (K, L).
Parameters
----------
x : sequence of 2 sequences, of lengths K and L
2-D input grid specified by sequence of 2 sequences of axis ticks
full_output : {False, True}, optional
True if output uncertainty should also be returned
Returns
-------
y : float array, shape (K, L)
Output of function as a 2-D numpy array
std_y : None or float array, shape (K, L), optional
Uncertainty of function output, expressed as standard deviation
(or None if no 'y' uncertainty was supplied during fitting)
"""
# Check dimensions
x = [np.atleast_1d(np.asarray(ax)) for ax in x]
if (len(x) != 2) or (len(x[0].shape) != 1) or (len(x[1].shape) != 1):
raise ValueError("Spline interpolator requires input data with "
"shape [(K,), (L,)], got %s instead" %
([ax.shape for ax in x],))
if self._interp is None:
raise NotFittedError("Spline not fitted to data yet - "
"first call .fit method")
# The standard DIERCKX 2-D spline evaluation function (bispev) expects
# a rectangular grid in ascending order. Therefore, sort coordinates,
# evaluate on the sorted grid, and return the desorted result
x0s, x1s = sorted(x[0]), sorted(x[1])
y = desort_grid(x[0], x[1], self._interp(x0s, x1s))
if not full_output:
return y
if self._std_fitted_y is None:
return y, None
# The output y variance is a weighted sum of the variances of the
# fitted y values, according to Enting's method
var_ys = np.zeros(y.shape)
for std_fitted_y, std_interp in zip(self._std_fitted_y.ravel(),
self._std_interps):
var_ys += (std_fitted_y * std_interp(x0s, x1s)) ** 2
return y, desort_grid(x[0], x[1], np.sqrt(var_ys))
| bsd-3-clause |
carsonmcdonald/electron | script/bootstrap.py | 73 | 5824 | #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
if PLATFORM != 'win32':
update_clang()
update_submodules()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch)
if args.target_arch in ['arm', 'ia32'] and PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update()
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
return parser.parse_args()
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url,
]
if is_dev:
args = ['--dev'] + args
execute_stdout([sys.executable, bootstrap] + args)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
if sys.platform in ['win32', 'cygwin']:
open_mode = 'wb+'
else:
open_mode = 'w+'
with open(target_file, open_mode) as f:
content = template.replace('{PLACEHOLDER}', version.strip())
if f.read() != content:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = '\n{}'
if f.read() != content:
f.write(content)
def run_update():
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update])
if __name__ == '__main__':
sys.exit(main())
| mit |
Trust-Code/PySPED | pysped/nfe/leiaute/conssitnfe_200.py | 9 | 5087 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import (ABERTURA, NAMESPACE_NFE, TagCaracter,
TagDecimal, TagInteiro, XMLNFe,
tira_abertura)
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_2 as ESQUEMA_ATUAL
from pysped.nfe.leiaute import conssitnfe_107
from pysped.nfe.leiaute import ProtNFe_200, RetCancNFe_200
import os
DIRNAME = os.path.dirname(__file__)
class ConsSitNFe(conssitnfe_107.ConsSitNFe):
def __init__(self):
super(ConsSitNFe, self).__init__()
self.versao = TagDecimal(nome='consSitNFe', codigo='EP01', propriedade='versao', namespace=NAMESPACE_NFE, valor='2.00', raiz='/')
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consSitNFe_v2.00.xsd'
class RetConsSitNFe(conssitnfe_107.RetConsSitNFe):
def __init__(self):
super(RetConsSitNFe, self).__init__()
self.versao = TagDecimal(nome='retConsSitNFe', codigo='ER01', propriedade='versao', namespace=NAMESPACE_NFE, valor='2.00', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='ER03' , tamanho=[1, 1, 1], raiz='//retConsSitNFe')
self.verAplic = TagCaracter(nome='verAplic' , codigo='ER04' , tamanho=[1, 20] , raiz='//retConsSitNFe')
self.cStat = TagCaracter(nome='cStat' , codigo='ER05' , tamanho=[1, 3] , raiz='//retConsSitNFe')
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='ER06' , tamanho=[1, 2000] , raiz='//retConsSitNFe')
self.cUF = TagInteiro(nome='cUF' , codigo='ER07' , tamanho=[2, 2, 2], raiz='//retConsSitNFe')
self.chNFe = TagCaracter(nome='chNFe' , codigo='ER07a', tamanho=[44, 44] , raiz='//retConsSitNFe', obrigatorio=False)
self.protNFe = None
self.retCancNFe = None
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsSitNFe_v2.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
xml += self.chNFe.xml
if self.protNFe is not None:
xml += self.protNFe.xml
if self.retCancNFe is not None:
xml += tira_abertura(self.retCancNFe.xml)
xml += '</retConsSitNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
self.chNFe.xml = arquivo
if self._le_noh('//retConsSitNFe/protNFe') is not None:
self.protNFe = ProtNFe_200()
self.protNFe.xml = arquivo
if self._le_noh('//retConsSitNFe/retCancNFe') is not None:
self.retCancNFe = RetCancNFe_200()
self.retCancNFe.xml = arquivo
xml = property(get_xml, set_xml)
| lgpl-2.1 |
StyXman/satyr | satyr/skins/simple.py | 1 | 2903 | # vim: set fileencoding=utf-8 :
# (c) 2009 Marcos Dione <mdione@grulic.org.ar>
# This file is part of satyr.
# satyr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# satyr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with satyr. If not, see <http://www.gnu.org/licenses/>.
# qt/kde related
from PyKDE4.kdeui import KMainWindow
from PyQt4 import uic
class MainWindow (KMainWindow):
def __init__ (self, parent=None):
KMainWindow.__init__ (self, parent)
# load the .ui file
# !!! __file__ can end with .py[co]!
uipath= __file__[:__file__.rfind ('.')]+'.ui'
(UIMainWindow, buh)= uic.loadUiType (uipath)
self.ui= UIMainWindow ()
self.ui.setupUi (self)
def connectUi (self, player, playlist):
self.player= player
self.playlist= playlist
# connect buttons!
self.ui.prevButton.clicked.connect (player.prev)
# the QPushButton.clicked() emits a bool,
# and it's False on normal (non-checkable) buttons
# no, it's not false, it's 0, which is indistinguishable from play(0)
# so lambda the 'bool' away
self.ui.playButton.clicked.connect (lambda b: player.play ())
self.ui.pauseButton.clicked.connect (player.pause)
self.ui.stopButton.clicked.connect (player.stop)
self.ui.nextButton.clicked.connect (player.next)
self.setModel (self.playlist.model)
def setModel (self, model):
self.model= model
# BUG?
# Traceback (most recent call last):
# File "satyr.py", line 124, in <module>
# sys.exit (main ())
# File "satyr.py", line 103, in main
# collection.scanBegins.connect (mw.scanBegins)
# AttributeError: 'MainWindow' object has no attribute 'scanBegins'
def scanBegins (self):
# self.ui.songsList.setEnabled (False)
# self.ui.songsList.setUpdatesEnabled (False)
pass
def scanFinished (self):
# self.ui.songsList.setEnabled (True)
# self.ui.songsList.setUpdatesEnabled (True)
pass
# BUG
# Traceback (most recent call last):
# File "satyr.py", line 124, in <module>
# sys.exit (main ())
# File "satyr.py", line 112, in main
# mw.collectionAdded ()
# AttributeError: 'MainWindow' object has no attribute 'collectionAdded'
def collectionAdded (self):
pass
def queryClose (self):
self.player.quit ()
return True
# end
| gpl-2.0 |
editorsnotes/editorsnotes | editorsnotes/main/management/commands/createproject.py | 1 | 1836 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from reversion import revisions as reversion
from editorsnotes.auth.models import Project, User
class Command(BaseCommand):
args = '"<project_name>" <project_slug>'
label = ''
help = 'Create a project'
option_list = BaseCommand.option_list + (
make_option('-u',
'--users',
action='store',
help=('comma-separated list of users to be added to the '
'project (optional)')),
)
def handle(self, *args, **options):
try:
project_name, project_slug = args
except ValueError:
raise CommandError('Incorrect number of arguments. Usage is:\n'
'createproject "<project_name>" <project_slug> '
'[--users]')
usernames = options['users'].split(',') if options['users'] else []
users = User.objects.filter(username__in=usernames)
if not len(users) == len(usernames):
bad_usernames = (set(usernames) -
set(users.values_list('username', flat=True)))
raise CommandError('The following are not valid users:\n'
'{}'.format(', '.join(bad_usernames)))
self.create_project(project_name, project_slug, users)
@transaction.atomic
def create_project(self, name, slug, users):
with reversion.create_revision():
project = Project.objects.create(name=name, slug=slug)
if users:
editor_role = project.roles.get()
editor_role.users.add(*users)
self.stdout.write('Created new project "{}".'.format(project.name))
| agpl-3.0 |
iglpdc/nipype | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py | 12 | 2093 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..brains import BRAINSMush
def test_BRAINSMush_inputs():
input_map = dict(args=dict(argstr='%s',
),
boundingBoxSize=dict(argstr='--boundingBoxSize %s',
sep=',',
),
boundingBoxStart=dict(argstr='--boundingBoxStart %s',
sep=',',
),
desiredMean=dict(argstr='--desiredMean %f',
),
desiredVariance=dict(argstr='--desiredVariance %f',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputFirstVolume=dict(argstr='--inputFirstVolume %s',
),
inputMaskVolume=dict(argstr='--inputMaskVolume %s',
),
inputSecondVolume=dict(argstr='--inputSecondVolume %s',
),
lowerThresholdFactor=dict(argstr='--lowerThresholdFactor %f',
),
lowerThresholdFactorPre=dict(argstr='--lowerThresholdFactorPre %f',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputMask=dict(argstr='--outputMask %s',
hash_files=False,
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
outputWeightsFile=dict(argstr='--outputWeightsFile %s',
hash_files=False,
),
seed=dict(argstr='--seed %s',
sep=',',
),
terminal_output=dict(nohash=True,
),
upperThresholdFactor=dict(argstr='--upperThresholdFactor %f',
),
upperThresholdFactorPre=dict(argstr='--upperThresholdFactorPre %f',
),
)
inputs = BRAINSMush.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSMush_outputs():
output_map = dict(outputMask=dict(),
outputVolume=dict(),
outputWeightsFile=dict(),
)
outputs = BRAINSMush.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
danielplohmann/apiscout | apiscout/IdaProxy.py | 1 | 2482 | ########################################################################
# Copyright (c) 2020
# Daniel Plohmann <daniel.plohmann<at>mailbox<dot>org>
# All rights reserved.
########################################################################
#
# This file is part of apiscout
#
# apiscout is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
import logging
LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)-15s %(message)s")
try:
from idaapi import *
import idc
import idautils
import idaapi
if idaapi.IDA_SDK_VERSION >= 700:
import ida_bytes
except:
print("could not import IDA python packages - probably being used externally")
raise
class IdaProxy(object):
def getByte(self, ea):
if idaapi.IDA_SDK_VERSION < 700:
return idc.Byte(ea)
else:
return idc.get_wide_byte(ea)
def getSegEnd(self, ea):
if idaapi.IDA_SDK_VERSION < 700:
return idc.SegEnd(ea)
else:
return idc.get_segm_end(ea)
def MakeDWord(self, ea):
if idaapi.IDA_SDK_VERSION < 700:
return idc.MakeDword(ea)
else:
return ida_bytes.create_data(ea, FF_DWORD, 4, idaapi.BADADDR)
def MakeQWord(self, ea):
if idaapi.IDA_SDK_VERSION < 700:
return idc.MakeQword(ea)
else:
return ida_bytes.create_data(ea, FF_QWORD, 8, idaapi.BADADDR)
def MakeName(self, ea, name):
if idaapi.IDA_SDK_VERSION < 700:
return idc.MakeNameEx(ea, name, 256)
else:
return idc.set_name(ea, name, 256)
def addTil(self, lib_name):
if idaapi.IDA_SDK_VERSION < 700:
return add_til(lib_name)
else:
return add_til(lib_name, idaapi.ADDTIL_DEFAULT)
| bsd-2-clause |
angr/angr | angr/procedures/posix/poll.py | 1 | 2116 | import angr
import select
######################################
# poll
######################################
class poll(angr.SimProcedure):
# pylint:disable=arguments-differ
def run(self, fds, nfds, timeout): # pylint: disable=unused-argument
try:
nfds_v = self.state.solver.eval_one(nfds)
except angr.errors.SimSolverError as e:
raise angr.errors.SimProcedureArgumentError("Can't handle symbolic pollfd arguments") from e
###
# struct pollfd {
# int fd; /* file descriptor */
# short events; /* requested events */
# short revents; /* returned events */
# };
size_of_pollfd = 8
offset_fd = 0
offset_events = 4
offset_revents = 6
pollfd_array = []
for offset in range(0, nfds_v):
pollfd = {
"fd": self.state.memory.load(fds + offset * size_of_pollfd + offset_fd, 4, endness=self.arch.memory_endness),
"events": self.state.memory.load(fds + offset * size_of_pollfd + offset_events, 2, endness=self.arch.memory_endness),
"revents": self.state.memory.load(fds + offset * size_of_pollfd + offset_revents, 2, endness=self.arch.memory_endness)
}
pollfd_array.append(pollfd)
for (offset,pollfd) in enumerate(pollfd_array):
try:
fd = self.state.solver.eval_one(pollfd["fd"])
events = self.state.solver.eval_one(pollfd["events"])
except angr.errors.SimSolverError as e:
raise angr.errors.SimProcedureArgumentError("Can't handle symbolic pollfd arguments") from e
if events & select.POLLIN and fd >= 0:
revents = pollfd["revents"][self.arch.sizeof["short"]-1:1].concat(self.state.solver.BVS('fd_POLLIN', 1))
self.state.memory.store(fds + offset * size_of_pollfd + offset_revents, revents, endness=self.arch.memory_endness)
retval = self.state.solver.BVV(0, 1).concat(self.state.solver.BVS('poll_ret', 31))
return retval
| bsd-2-clause |
wpgallih/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_parser2.py | 451 | 2119 | from __future__ import absolute_import, division, unicode_literals
import io
from . import support # flake8: noqa
from html5lib import html5parser
from html5lib.constants import namespaces
from html5lib import treebuilders
import unittest
# tests that aren't autogenerated from text files
class MoreParserTests(unittest.TestCase):
def setUp(self):
self.dom_tree = treebuilders.getTreeBuilder("dom")
def test_assertDoctypeCloneable(self):
parser = html5parser.HTMLParser(tree=self.dom_tree)
doc = parser.parse('<!DOCTYPE HTML>')
self.assertTrue(doc.cloneNode(True))
def test_line_counter(self):
# http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0
parser = html5parser.HTMLParser(tree=self.dom_tree)
parser.parse("<pre>\nx\n>\n</pre>")
def test_namespace_html_elements_0_dom(self):
parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=True)
doc = parser.parse("<html></html>")
self.assertTrue(doc.childNodes[0].namespaceURI == namespaces["html"])
def test_namespace_html_elements_1_dom(self):
parser = html5parser.HTMLParser(tree=self.dom_tree, namespaceHTMLElements=False)
doc = parser.parse("<html></html>")
self.assertTrue(doc.childNodes[0].namespaceURI is None)
def test_namespace_html_elements_0_etree(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=True)
doc = parser.parse("<html></html>")
self.assertTrue(list(doc)[0].tag == "{%s}html" % (namespaces["html"],))
def test_namespace_html_elements_1_etree(self):
parser = html5parser.HTMLParser(namespaceHTMLElements=False)
doc = parser.parse("<html></html>")
self.assertTrue(list(doc)[0].tag == "html")
def test_unicode_file(self):
parser = html5parser.HTMLParser()
parser.parse(io.StringIO("a"))
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| mpl-2.0 |
FlintHill/SUAS-Competition | env/lib/python3.7/site-packages/pip/_vendor/chardet/compat.py | 270 | 1134 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Dan Blanchard
# Ian Cordasco
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
PY2 = True
PY3 = False
base_str = (str, unicode)
text_type = unicode
else:
PY2 = False
PY3 = True
base_str = (bytes, str)
text_type = str
| mit |
smerritt/swift | test/unit/common/ring/test_builder.py | 2 | 196991 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import mock
import operator
import os
import unittest
import six.moves.cPickle as pickle
from array import array
from collections import Counter, defaultdict
from math import ceil
from tempfile import mkdtemp
from shutil import rmtree
import sys
import random
import uuid
import itertools
from six.moves import range
from swift.common import exceptions
from swift.common import ring
from swift.common.ring import utils
from swift.common.ring.builder import MAX_BALANCE
def _partition_counts(builder, key='id'):
"""
Returns a dictionary mapping the given device key to (number of
partitions assigned to that key).
"""
return Counter(builder.devs[dev_id][key]
for part2dev_id in builder._replica2part2dev
for dev_id in part2dev_id)
class TestRingBuilder(unittest.TestCase):
def setUp(self):
self.testdir = mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _get_population_by_region(self, builder):
"""
Returns a dictionary mapping region to number of partitions in that
region.
"""
return _partition_counts(builder, key='region')
def test_init(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEqual(rb.part_power, 8)
self.assertEqual(rb.replicas, 3)
self.assertEqual(rb.min_part_hours, 1)
self.assertEqual(rb.parts, 2 ** 8)
self.assertEqual(rb.devs, [])
self.assertFalse(rb.devs_changed)
self.assertEqual(rb.version, 0)
self.assertIsNotNone(rb._last_part_moves)
def test_overlarge_part_powers(self):
expected_msg = 'part_power must be at most 32 (was 33)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(33, 3, 1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_insufficient_replicas(self):
expected_msg = 'replicas must be at least 1 (was 0.999000)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(8, 0.999, 1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_negative_min_part_hours(self):
expected_msg = 'min_part_hours must be non-negative (was -1)'
with self.assertRaises(ValueError) as ctx:
ring.RingBuilder(8, 3, -1)
self.assertEqual(str(ctx.exception), expected_msg)
def test_deepcopy(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdb1'})
# more devices in zone #1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdc1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sdd1'})
rb.rebalance()
rb_copy = copy.deepcopy(rb)
self.assertEqual(rb.to_dict(), rb_copy.to_dict())
self.assertIsNot(rb.devs, rb_copy.devs)
self.assertIsNot(rb._replica2part2dev, rb_copy._replica2part2dev)
self.assertIsNot(rb._last_part_moves, rb_copy._last_part_moves)
self.assertIsNot(rb._remove_devs, rb_copy._remove_devs)
self.assertIsNot(rb._dispersion_graph, rb_copy._dispersion_graph)
def test_get_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.remove_dev(1)
rb.rebalance()
r = rb.get_ring()
self.assertIsInstance(r, ring.RingData)
r2 = rb.get_ring()
self.assertIs(r, r2)
rb.rebalance()
r3 = rb.get_ring()
self.assertIsNot(r3, r2)
r4 = rb.get_ring()
self.assertIs(r3, r4)
def test_rebalance_with_seed(self):
devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)]
ring_builders = []
for n in range(3):
rb = ring.RingBuilder(8, 3, 1)
idx = 0
for zone, port in devs:
for d in ('sda1', 'sdb1'):
rb.add_dev({'id': idx, 'region': 0, 'zone': zone,
'ip': '127.0.0.1', 'port': port,
'device': d, 'weight': 1})
idx += 1
ring_builders.append(rb)
rb0 = ring_builders[0]
rb1 = ring_builders[1]
rb2 = ring_builders[2]
r0 = rb0.get_ring()
self.assertIs(rb0.get_ring(), r0)
rb0.rebalance() # NO SEED
rb1.rebalance(seed=10)
rb2.rebalance(seed=10)
r1 = rb1.get_ring()
r2 = rb2.get_ring()
self.assertIsNot(rb0.get_ring(), r0)
self.assertNotEqual(r0.to_dict(), r1.to_dict())
self.assertEqual(r1.to_dict(), r2.to_dict())
# check that random state is reset
pre_state = random.getstate()
rb2.rebalance(seed=10)
self.assertEqual(pre_state, random.getstate(),
"Random state was not reset")
pre_state = random.getstate()
with mock.patch.object(rb2, "_build_replica_plan",
side_effect=Exception()):
self.assertRaises(Exception, rb2.rebalance, seed=10)
self.assertEqual(pre_state, random.getstate(),
"Random state was not reset")
def test_rebalance_part_on_deleted_other_part_on_drained(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance(seed=1)
# We want a partition where 1 replica is on a removed device, 1
# replica is on a 0-weight device, and 1 on a normal device. To
# guarantee we have one, we see where partition 123 is, then
# manipulate its devices accordingly.
zero_weight_dev_id = rb._replica2part2dev[1][123]
delete_dev_id = rb._replica2part2dev[2][123]
rb.set_dev_weight(zero_weight_dev_id, 0.0)
rb.remove_dev(delete_dev_id)
rb.rebalance()
def test_set_replicas(self):
rb = ring.RingBuilder(8, 3.2, 1)
rb.devs_changed = False
rb.set_replicas(3.25)
self.assertTrue(rb.devs_changed)
rb.devs_changed = False
rb.set_replicas(3.2500001)
self.assertFalse(rb.devs_changed)
def test_add_dev(self):
rb = ring.RingBuilder(8, 3, 1)
dev = {'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000}
dev_id = rb.add_dev(dev)
self.assertRaises(exceptions.DuplicateDeviceError, rb.add_dev, dev)
self.assertEqual(dev_id, 0)
rb = ring.RingBuilder(8, 3, 1)
# test add new dev with no id
dev_id = rb.add_dev({'zone': 0, 'region': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 6200})
self.assertEqual(rb.devs[0]['id'], 0)
self.assertEqual(dev_id, 0)
# test add another dev with no id
dev_id = rb.add_dev({'zone': 3, 'region': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 6200})
self.assertEqual(rb.devs[1]['id'], 1)
self.assertEqual(dev_id, 1)
# some keys are required
self.assertRaises(ValueError, rb.add_dev, {})
stub_dev = {'weight': 1, 'ip': '127.0.0.1', 'port': 7000}
for key in (stub_dev.keys()):
dev = stub_dev.copy()
dev.pop(key)
self.assertRaises(ValueError, rb.add_dev, dev)
def test_set_dev_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256})
rb.set_dev_weight(0, 0.75)
rb.set_dev_weight(1, 0.25)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 64, 2: 256, 3: 256})
def test_remove_dev(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.remove_dev(1)
rb.pretend_min_part_hours_passed()
rb.rebalance()
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 256, 2: 256, 3: 256})
def test_round_off_error(self):
# 3 nodes with 11 disks each is particularly problematic. Probably has
# to do with the binary repr. of 1/33? Those ones look suspicious...
#
# >>> bin(int(struct.pack('!f', 1.0/(33)).encode('hex'), 16))
# '0b111100111110000011111000010000'
rb = ring.RingBuilder(8, 3, 1)
for dev_id, (region, zone) in enumerate(
11 * [(0, 0), (1, 10), (1, 11)]):
rb.add_dev({'id': dev_id, 'region': region, 'zone': zone,
'weight': 1, 'ip': '127.0.0.1',
'port': 10000 + region * 100 + zone,
'device': 'sda%d' % dev_id})
rb.rebalance()
self.assertEqual(_partition_counts(rb, 'zone'),
{0: 256, 10: 256, 11: 256})
wanted_by_zone = defaultdict(lambda: defaultdict(int))
for dev in rb._iter_devs():
wanted_by_zone[dev['zone']][dev['parts_wanted']] += 1
# We're nicely balanced, but parts_wanted is slightly lumpy
# because reasons.
self.assertEqual(wanted_by_zone, {
0: {0: 10, 1: 1},
10: {0: 11},
11: {0: 10, -1: 1}})
def test_remove_a_lot(self):
rb = ring.RingBuilder(3, 3, 1)
rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 4, 'device': 'd4', 'ip': '10.0.0.2',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 5, 'device': 'd5', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.rebalance()
rb.validate()
# this has to put more than 1/3 of the partitions in the
# cluster on removed devices in order to ensure that at least
# one partition has multiple replicas that need to move.
#
# (for an N-replica ring, it's more than 1/N of the
# partitions, of course)
rb.remove_dev(3)
rb.remove_dev(4)
rb.remove_dev(5)
rb.rebalance()
rb.validate()
def test_remove_zero_weighted(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'device': 'd0', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.add_dev({'id': 1, 'device': 'd1', 'ip': '10.0.0.2',
'port': 6202, 'weight': 0.0, 'region': 0, 'zone': 2})
rb.add_dev({'id': 2, 'device': 'd2', 'ip': '10.0.0.3',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 3})
rb.add_dev({'id': 3, 'device': 'd3', 'ip': '10.0.0.1',
'port': 6202, 'weight': 1000.0, 'region': 0, 'zone': 1})
rb.rebalance()
rb.remove_dev(1)
parts, balance, removed = rb.rebalance()
self.assertEqual(removed, 1)
def test_shuffled_gather(self):
if self._shuffled_gather_helper() and \
self._shuffled_gather_helper():
raise AssertionError('It is highly likely the ring is no '
'longer shuffling the set of partitions '
'to reassign on a rebalance.')
def _shuffled_gather_helper(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
replica_plan = rb._build_replica_plan()
rb._set_parts_wanted(replica_plan)
for dev in rb._iter_devs():
dev['tiers'] = utils.tiers_for_dev(dev)
assign_parts = defaultdict(list)
rb._gather_parts_for_balance(assign_parts, replica_plan, False)
max_run = 0
run = 0
last_part = 0
for part, _ in assign_parts.items():
if part > last_part:
run += 1
else:
if run > max_run:
max_run = run
run = 0
last_part = part
if run > max_run:
max_run = run
return max_run > len(assign_parts) / 2
def test_initial_balance(self):
# 2 boxes, 2 drives each in zone 1
# 1 box, 2 drives in zone 2
#
# This is balanceable, but there used to be some nondeterminism in
# rebalance() that would sometimes give you an imbalanced ring.
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 1, 'weight': 4000.0,
'ip': '10.1.1.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'region': 1, 'zone': 2, 'weight': 4000.0,
'ip': '10.1.1.3', 'port': 10000, 'device': 'sdb'})
_, balance, _ = rb.rebalance(seed=2)
# maybe not *perfect*, but should be close
self.assertLessEqual(balance, 1)
def test_multitier_partial(self):
# Multitier test, nothing full
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 2, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 3, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['region'][dev['region']] += 1
counts['zone'][dev['zone']] += 1
if any(c > 1 for c in counts['region'].values()):
raise AssertionError(
"Partition %d not evenly region-distributed (got %r)" %
(part, counts['region']))
if any(c > 1 for c in counts['zone'].values()):
raise AssertionError(
"Partition %d not evenly zone-distributed (got %r)" %
(part, counts['zone']))
# Multitier test, zones full, nodes not full
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count > 1:
raise AssertionError(
"Partition %d is on device %d more than once (%r)" %
(part, dev_id, counts['dev_id']))
def test_multitier_full(self):
# Multitier test, #replicas == #devs
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
if counts['zone'] != {0: 2, 1: 2, 2: 2}:
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count != 1:
raise AssertionError(
"Partition %d is on device %d %d times, not 1 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_overfull(self):
# Multitier test, #replicas > #zones (to prove even distribution)
rb = ring.RingBuilder(8, 8, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdf'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = defaultdict(lambda: defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEqual(8, sum(counts['zone'].values()))
for zone, replica_count in counts['zone'].items():
if replica_count not in (2, 3):
raise AssertionError(
"Partition %d not evenly distributed (got %r)" %
(part, counts['zone']))
for dev_id, replica_count in counts['dev_id'].items():
if replica_count not in (1, 2):
raise AssertionError(
"Partition %d is on device %d %d times, "
"not 1 or 2 (%r)" %
(part, dev_id, replica_count, counts['dev_id']))
def test_multitier_expansion_more_devices(self):
rb = ring.RingBuilder(8, 6, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
for _ in range(5):
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
counts = dict(zone=defaultdict(int),
dev_id=defaultdict(int))
for replica in range(rb.replicas):
dev = rb.devs[rb._replica2part2dev[replica][part]]
counts['zone'][dev['zone']] += 1
counts['dev_id'][dev['id']] += 1
self.assertEqual({0: 2, 1: 2, 2: 2}, dict(counts['zone']))
# each part is assigned once to six unique devices
self.assertEqual(list(counts['dev_id'].values()), [1] * 6)
self.assertEqual(len(set(counts['dev_id'].keys())), 6)
def test_multitier_part_moves_with_0_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is 0, so we're clear to move 2 replicas to
# new devs
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if len(devs) != 3:
raise AssertionError(
"Partition %d not on 3 devs (got %r)" % (part, devs))
def test_multitier_part_moves_with_positive_min_part_hours(self):
rb = ring.RingBuilder(8, 3, 99)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.rebalance()
rb.validate()
# min_part_hours is >0, so we'll only be able to move 1
# replica to a new home
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
devs = set()
for replica in range(rb.replicas):
devs.add(rb._replica2part2dev[replica][part])
if not any(rb.devs[dev_id]['zone'] == 1 for dev_id in devs):
raise AssertionError(
"Partition %d did not move (got %r)" % (part, devs))
def test_multitier_dont_move_too_many_replicas(self):
rb = ring.RingBuilder(8, 3, 1)
# there'll be at least one replica in z0 and z1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.rebalance()
rb.validate()
# only 1 replica should move
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
for part in range(rb.parts):
zones = set()
for replica in range(rb.replicas):
zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone'])
if len(zones) != 3:
raise AssertionError(
"Partition %d not in 3 zones (got %r)" % (part, zones))
if 0 not in zones or 1 not in zones:
raise AssertionError(
"Partition %d not in zones 0 and 1 (got %r)" %
(part, zones))
def test_min_part_hours_zero_will_move_one_replica(self):
rb = ring.RingBuilder(8, 3, 0)
# there'll be at least one replica in z0 and z1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.rebalance(seed=1)
rb.validate()
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 4, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf1'})
rb.rebalance(seed=3)
rb.validate()
self.assertEqual(0, rb.dispersion)
# Only one replica could move, so some zones are quite unbalanced
self.assertAlmostEqual(rb.get_balance(), 66.66, delta=0.5)
# There was only zone 0 and 1 before adding more devices. Only one
# replica should have been moved, therefore we expect 256 parts in zone
# 0 and 1, and a total of 256 in zone 2,3, and 4
expected = defaultdict(int, {0: 256, 1: 256, 2: 86, 3: 85, 4: 85})
self.assertEqual(expected, _partition_counts(rb, key='zone'))
zone_histogram = defaultdict(int)
for part in range(rb.parts):
zones = [
rb.devs[rb._replica2part2dev[replica][part]]['zone']
for replica in range(rb.replicas)]
zone_histogram[tuple(sorted(zones))] += 1
# We expect that every partition moved exactly one replica
expected = {
(0, 1, 2): 86,
(0, 1, 3): 85,
(0, 1, 4): 85,
}
self.assertEqual(zone_histogram, expected)
# After rebalancing one more times, we expect that everything is in a
# good state
rb.rebalance(seed=3)
self.assertEqual(0, rb.dispersion)
# a balance of w/i a 1% isn't too bad for 3 replicas on 7
# devices when part power is only 8
self.assertAlmostEqual(rb.get_balance(), 0, delta=0.5)
# every zone has either 153 or 154 parts
for zone, count in _partition_counts(
rb, key='zone').items():
self.assertAlmostEqual(153.5, count, delta=1)
parts_with_moved_count = defaultdict(int)
for part in range(rb.parts):
zones = set()
for replica in range(rb.replicas):
zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone'])
moved_replicas = len(zones - {0, 1})
parts_with_moved_count[moved_replicas] += 1
# as usual, the real numbers depend on the seed, but we want to
# validate a few things here:
#
# 1) every part had to move one replica to hit dispersion (so no
# one can have a moved count 0)
#
# 2) it's quite reasonable that some small percent of parts will
# have a replica in {0, 1, X} (meaning only one replica of the
# part moved)
#
# 3) when min_part_hours is 0, more than one replica of a part
# can move in a rebalance, and since that movement would get to
# better dispersion faster we expect to observe most parts in
# {[0,1], X, X} (meaning *two* replicas of the part moved)
#
# 4) there's plenty of weight in z0 & z1 to hold a whole
# replicanth, so there is no reason for any part to have to move
# all three replicas out of those zones (meaning no one can have
# a moved count 3)
#
expected = {
1: 52,
2: 204,
}
self.assertEqual(parts_with_moved_count, expected)
def test_ever_rebalanced(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
self.assertFalse(rb.ever_rebalanced)
builder_file = os.path.join(self.testdir, 'test.buider')
rb.save(builder_file)
rb = ring.RingBuilder.load(builder_file)
self.assertFalse(rb.ever_rebalanced)
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
rb.save(builder_file)
rb = ring.RingBuilder.load(builder_file)
self.assertTrue(rb.ever_rebalanced)
def test_rerebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
self.assertFalse(rb.ever_rebalanced)
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
counts = _partition_counts(rb)
self.assertEqual(counts, {0: 256, 1: 256, 2: 256})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
self.assertTrue(rb.ever_rebalanced)
counts = _partition_counts(rb)
self.assertEqual(counts, {0: 192, 1: 192, 2: 192, 3: 192})
rb.set_dev_weight(3, 100)
rb.rebalance()
counts = _partition_counts(rb)
self.assertEqual(counts[3], 256)
def test_add_rebalance_add_rebalance_delete_rebalance(self):
# Test for https://bugs.launchpad.net/swift/+bug/845952
# min_part of 0 to allow for rapid rebalancing
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.remove_dev(1)
# well now we have only one device in z0
rb.set_overload(0.5)
rb.rebalance()
rb.validate()
def test_remove_last_partition_from_zero_weight(self):
rb = ring.RingBuilder(4, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 1, 'weight': 1.0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 0.4,
'ip': '127.0.0.3', 'port': 10001, 'device': 'zero'})
zero_weight_dev = 3
rb.rebalance(seed=1)
# We want at least one partition with replicas only in zone 2 and 3
# due to device weights. It would *like* to spread out into zone 1,
# but can't, due to device weight.
#
# Also, we want such a partition to have a replica on device 3,
# which we will then reduce to zero weight. This should cause the
# removal of the replica from device 3.
#
# Getting this to happen by chance is hard, so let's just set up a
# builder so that it's in the state we want. This is a synthetic
# example; while the bug has happened on a real cluster, that
# builder file had a part_power of 16, so its contents are much too
# big to include here.
rb._replica2part2dev = [
# these are the relevant ones
# | | |
# v v v
array('H', [2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2, 5, 6, 2]),
array('H', [1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4]),
array('H', [0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 5, 6, 2, 5, 6])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
rb.set_dev_weight(zero_weight_dev, 0.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
node_counts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
node_counts[dev_id] += 1
self.assertEqual(node_counts[zero_weight_dev], 0)
# it's as balanced as it gets, so nothing moves anymore
rb.pretend_min_part_hours_passed()
parts_moved, _balance, _removed = rb.rebalance(seed=1)
new_node_counts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_node_counts[dev_id] += 1
del node_counts[zero_weight_dev]
self.assertEqual(node_counts, new_node_counts)
self.assertEqual(parts_moved, 0)
def test_part_swapping_problem(self):
rb = ring.RingBuilder(4, 3, 1)
# 127.0.0.1 (2 devs)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
# 127.0.0.2 (3 devs)
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
expected = {
'127.0.0.1': 1.2,
'127.0.0.2': 1.7999999999999998,
}
for wr in (rb._build_weighted_replicas_by_tier(),
rb._build_wanted_replicas_by_tier(),
rb._build_target_replicas_by_tier()):
self.assertEqual(expected, {t[-1]: r for (t, r) in
wr.items() if len(t) == 3})
self.assertEqual(rb.get_required_overload(), 0)
rb.rebalance(seed=3)
# so 127.0.0.1 ended up with...
tier = (0, 0, '127.0.0.1')
# ... 6 parts with 1 replicas
self.assertEqual(rb._dispersion_graph[tier][1], 12)
# ... 4 parts with 2 replicas
self.assertEqual(rb._dispersion_graph[tier][2], 4)
# but since we only have two tiers, this is *totally* dispersed
self.assertEqual(0, rb.dispersion)
# small rings are hard to balance...
expected = {0: 10, 1: 10, 2: 10, 3: 9, 4: 9}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
# everyone wants 9.6 parts
expected = {
0: 4.166666666666671,
1: 4.166666666666671,
2: 4.166666666666671,
3: -6.25,
4: -6.25,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# original sorted _replica2part2dev
"""
rb._replica2part2dev = [
array('H', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]),
array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]),
array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])]
"""
# now imagine if we came along this _replica2part2dev through no
# fault of our own; if instead of the 12 parts with only one
# replica on 127.0.0.1 being split evenly (6 and 6) on device's
# 0 and 1 - device 1 inexplicitly had 3 extra parts
rb._replica2part2dev = [
# these are the relevant one's here
# | | |
# v v v
array('H', [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
array('H', [1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3]),
array('H', [2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
# reset the _last_part_gather_start otherwise
# there is a chance it'll unluckly wrap and try and
# move one of the device 1's from replica 2
# causing the intermitant failure in bug 1724356
rb._last_part_gather_start = 0
rb.pretend_min_part_hours_passed()
rb.rebalance()
expected = {
0: 4.166666666666671,
1: 4.166666666666671,
2: 4.166666666666671,
3: -6.25,
4: -6.25,
}
self.assertEqual(expected, rb._build_balance_per_dev())
self.assertEqual(rb.get_balance(), 6.25)
def test_wrong_tier_with_no_where_to_go(self):
rb = ring.RingBuilder(4, 3, 1)
# 127.0.0.1 (even devices)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 900,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# 127.0.0.2 (odd devices)
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdd'})
expected = {
'127.0.0.1': 1.75,
'127.0.0.2': 1.25,
}
for wr in (rb._build_weighted_replicas_by_tier(),
rb._build_wanted_replicas_by_tier(),
rb._build_target_replicas_by_tier()):
self.assertEqual(expected, {t[-1]: r for (t, r) in
wr.items() if len(t) == 3})
self.assertEqual(rb.get_required_overload(), 0)
rb.rebalance(seed=3)
# so 127.0.0.1 ended up with...
tier = (0, 0, '127.0.0.1')
# ... 4 parts with 1 replicas
self.assertEqual(rb._dispersion_graph[tier][1], 4)
# ... 12 parts with 2 replicas
self.assertEqual(rb._dispersion_graph[tier][2], 12)
# ... and of course 0 parts with 3 replicas
self.assertEqual(rb._dispersion_graph[tier][3], 0)
# but since we only have two tiers, this is *totally* dispersed
self.assertEqual(0, rb.dispersion)
# small rings are hard to balance, but it's possible when
# part-replicas (3 * 2 ** 4) can go evenly into device weights
# (4800) like we've done here
expected = {
0: 1,
2: 9,
4: 9,
6: 9,
1: 5,
3: 5,
5: 5,
7: 5,
}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
expected = {
0: 0.0,
1: 0.0,
2: 0.0,
3: 0.0,
4: 0.0,
5: 0.0,
6: 0.0,
7: 0.0,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# all devices have exactly the # of parts they want
expected = {
0: 0,
2: 0,
4: 0,
6: 0,
1: 0,
3: 0,
5: 0,
7: 0,
}
self.assertEqual(expected, {d['id']: d['parts_wanted']
for d in rb._iter_devs()})
# original sorted _replica2part2dev
"""
rb._replica2part2dev = [
array('H', [0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, ]),
array('H', [4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1, ]),
array('H', [1, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, ])]
"""
# now imagine if we came along this _replica2part2dev through no
# fault of our own; and device 0 had extra parts, but both
# copies of the other replicas were already in the other tier!
rb._replica2part2dev = [
# these are the relevant one's here
# | |
# v v
array('H', [2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 0, 0]),
array('H', [4, 4, 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1]),
array('H', [1, 1, 3, 3, 3, 3, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7])]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
replica_plan = rb._build_replica_plan()
rb._set_parts_wanted(replica_plan)
expected = {
0: -1, # this device wants to shed
2: 0,
4: 0,
6: 0,
1: 0,
3: 1, # there's devices with room on the other server
5: 0,
7: 0,
}
self.assertEqual(expected, {d['id']: d['parts_wanted']
for d in rb._iter_devs()})
self.assertEqual(rb.get_balance(), 100)
rb.pretend_min_part_hours_passed()
# There's something like a 11% chance that we won't be able to get to
# a balance of 0 (and a 6% chance that we won't change anything at all)
# Pick a seed to make this pass.
rb.rebalance(seed=123)
self.assertEqual(rb.get_balance(), 0)
def test_multiple_duplicate_device_assignment(self):
rb = ring.RingBuilder(4, 4, 1)
devs = [
'r1z1-127.0.0.1:6200/d1',
'r1z1-127.0.0.1:6201/d2',
'r1z1-127.0.0.1:6202/d3',
'r1z1-127.0.0.1:33443/d4',
'r1z1-127.0.0.2:6200/d5',
'r1z1-127.0.0.2:6201/d6',
'r1z1-127.0.0.2:6202/d7',
'r1z1-127.0.0.2:6202/d8',
]
for add_value in devs:
dev = utils.parse_add_value(add_value)
dev['weight'] = 1.0
rb.add_dev(dev)
rb.rebalance()
rb._replica2part2dev = [
# these are the relevant one's here
# | | | | |
# v v v v v
array('H', [0, 1, 2, 3, 3, 0, 0, 0, 4, 6, 4, 4, 4, 4, 4, 4]),
array('H', [0, 1, 3, 1, 1, 1, 1, 1, 5, 7, 5, 5, 5, 5, 5, 5]),
array('H', [0, 1, 2, 2, 2, 2, 2, 2, 4, 6, 6, 6, 6, 6, 6, 6]),
array('H', [0, 3, 2, 3, 3, 3, 3, 3, 5, 7, 7, 7, 7, 7, 7, 7])
# ^
# |
# this sort of thing worked already
]
# fix up bookkeeping
new_dev_parts = defaultdict(int)
for part2dev_id in rb._replica2part2dev:
for dev_id in part2dev_id:
new_dev_parts[dev_id] += 1
for dev in rb._iter_devs():
dev['parts'] = new_dev_parts[dev['id']]
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate()
def test_region_fullness_with_balanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 2, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10005, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 2, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10006, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 3, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10007, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 3, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10008, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region,
{0: 192, 1: 192, 2: 192, 3: 192})
def test_region_fullness_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 2,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.rebalance(seed=2)
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 512, 1: 256})
def test_adding_region_slowly_with_unbalanceable_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 0.5,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'})
rb.rebalance(seed=2)
rb.add_dev({'id': 2, 'region': 1, 'zone': 0, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 1, 'weight': 0.25,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
changed_parts, _balance, _removed = rb.rebalance(seed=2)
# there's not enough room in r1 for every partition to have a replica
# in it, so only 86 assignments occur in r1 (that's ~1/5 of the total,
# since r1 has 1/5 of the weight).
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 682, 1: 86})
# really 86 parts *should* move (to the new region) but to avoid
# accidentally picking up too many and causing some parts to randomly
# flop around devices in the original region - our gather algorithm
# is conservative when picking up only from devices that are for sure
# holding more parts than they want (math.ceil() of the replica_plan)
# which guarantees any parts picked up will have new homes in a better
# tier or failure_domain.
self.assertEqual(86, changed_parts)
# and since there's not enough room, subsequent rebalances will not
# cause additional assignments to r1
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 682, 1: 86})
# after you add more weight, more partition assignments move
rb.set_dev_weight(2, 0.5)
rb.set_dev_weight(3, 0.5)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 614, 1: 154})
rb.set_dev_weight(2, 1.0)
rb.set_dev_weight(3, 1.0)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=2)
rb.validate()
population_by_region = self._get_population_by_region(rb)
self.assertEqual(population_by_region, {0: 512, 1: 256})
def test_avoid_tier_change_new_region(self):
rb = ring.RingBuilder(8, 3, 1)
for i in range(5):
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.1', 'port': i, 'device': 'sda1'})
rb.rebalance(seed=2)
# Add a new device in new region to a balanced ring
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 0,
'ip': '127.0.0.5', 'port': 10000, 'device': 'sda1'})
# Increase the weight of region 1 slowly
moved_partitions = []
errors = []
for weight in range(0, 101, 10):
rb.set_dev_weight(5, weight)
rb.pretend_min_part_hours_passed()
changed_parts, _balance, _removed = rb.rebalance(seed=2)
rb.validate()
moved_partitions.append(changed_parts)
# Ensure that the second region has enough partitions
# Otherwise there will be replicas at risk
min_parts_for_r1 = ceil(weight / (500.0 + weight) * 768)
parts_for_r1 = self._get_population_by_region(rb).get(1, 0)
try:
self.assertEqual(min_parts_for_r1, parts_for_r1)
except AssertionError:
errors.append('weight %s got %s parts but expected %s' % (
weight, parts_for_r1, min_parts_for_r1))
self.assertFalse(errors)
# Number of partitions moved on each rebalance
# 10/510 * 768 ~ 15.06 -> move at least 15 partitions in first step
ref = [0, 16, 14, 14, 13, 13, 13, 12, 11, 12, 10]
self.assertEqual(ref, moved_partitions)
def test_set_replicas_increase(self):
rb = ring.RingBuilder(8, 2, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 2.1
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 25])
rb.replicas = 2.2
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 51])
def test_set_replicas_decrease(self):
rb = ring.RingBuilder(4, 5, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
rb.validate()
rb.replicas = 4.9
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 16, 16, 14])
# cross a couple of integer thresholds (4 and 3)
rb.replicas = 2.5
rb.rebalance()
rb.validate()
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[16, 16, 8])
def test_fractional_replicas_rebalance(self):
rb = ring.RingBuilder(8, 2.5, 0)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance() # passes by not crashing
rb.validate() # also passes by not crashing
self.assertEqual([len(p2d) for p2d in rb._replica2part2dev],
[256, 256, 128])
def test_create_add_dev_add_replica_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.set_replicas(4)
rb.rebalance() # this would crash since parts_wanted was not set
rb.validate()
def test_reduce_replicas_after_remove_device(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.rebalance()
rb.remove_dev(0)
self.assertRaises(exceptions.RingValidationError, rb.rebalance)
rb.set_replicas(2)
rb.rebalance()
rb.validate()
def test_rebalance_post_upgrade(self):
rb = ring.RingBuilder(8, 3, 1)
# 5 devices: 5 is the smallest number that does not divide 3 * 2^8,
# which forces some rounding to happen.
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.rebalance()
rb.validate()
# Older versions of the ring builder code would round down when
# computing parts_wanted, while the new code rounds up. Make sure we
# can handle a ring built by the old method.
#
# This code mimics the old _set_parts_wanted.
weight_of_one_part = rb.weight_of_one_part()
for dev in rb._iter_devs():
if not dev['weight']:
dev['parts_wanted'] = -rb.parts * rb.replicas
else:
dev['parts_wanted'] = (
int(weight_of_one_part * dev['weight']) -
dev['parts'])
rb.pretend_min_part_hours_passed()
rb.rebalance() # this crashes unless rebalance resets parts_wanted
rb.validate()
def test_add_replicas_then_rebalance_respects_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'})
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 96, 1: 96,
2: 32, 3: 32,
4: 96, 5: 96,
6: 32, 7: 32,
8: 96, 9: 96,
10: 32, 11: 32})
rb.replicas *= 2
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 192,
2: 64, 3: 64,
4: 192, 5: 192,
6: 64, 7: 64,
8: 192, 9: 192,
10: 64, 11: 64})
def test_overload(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'})
rb.rebalance(seed=12345)
rb.validate()
# sanity check: balance respects weights, so default
part_counts = _partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 192)
self.assertEqual(part_counts[1], 192)
self.assertEqual(part_counts[2], 384)
# Devices 0 and 1 take 10% more than their fair shares by weight since
# overload is 10% (0.1).
rb.set_overload(0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual({0: 212, 1: 211, 2: 345}, part_counts)
# Now, devices 0 and 1 take 50% more than their fair shares by
# weight.
rb.set_overload(0.5)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual({0: 256, 1: 256, 2: 256}, part_counts)
# Devices 0 and 1 may take up to 75% over their fair share, but the
# placement algorithm only wants to spread things out evenly between
# all drives, so the devices stay at 50% more.
rb.set_overload(0.75)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
def test_unoverload(self):
# Start off needing overload to balance, then add capacity until we
# don't need overload any more and see that things still balance.
# Overload doesn't prevent optimal balancing.
rb = ring.RingBuilder(8, 3, 1)
rb.set_overload(0.125)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.rebalance(seed=12345)
# sanity check: our overload is big enough to balance things
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 216)
self.assertEqual(part_counts['127.0.0.2'], 216)
self.assertEqual(part_counts['127.0.0.3'], 336)
# Add some weight: balance improves
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 1.22)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='ip')
self.assertEqual({
'127.0.0.1': 237,
'127.0.0.2': 237,
'127.0.0.3': 294,
}, part_counts)
# Even out the weights: balance becomes perfect
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 2)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 256)
self.assertEqual(part_counts['127.0.0.2'], 256)
self.assertEqual(part_counts['127.0.0.3'], 256)
# Add a new server: balance stays optimal
rb.add_dev({'id': 12, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
# we're moving more than 1/3 of the replicas but fewer than 2/3, so
# we have to do this twice
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
expected = {
'127.0.0.1': 192,
'127.0.0.2': 192,
'127.0.0.3': 192,
'127.0.0.4': 192,
}
part_counts = _partition_counts(rb, key='ip')
self.assertEqual(part_counts, expected)
def test_overload_keeps_balanceable_things_balanced_initially(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=12345)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_overload_keeps_balanceable_things_balanced_on_rebalance(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sdb'})
rb.set_overload(99999)
rb.rebalance(seed=123)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
# swap weights between 10.0.0.1 and 10.0.0.2
rb.set_dev_weight(0, 4)
rb.set_dev_weight(1, 4)
rb.set_dev_weight(2, 8)
rb.set_dev_weight(1, 8)
rb.rebalance(seed=456)
part_counts = _partition_counts(rb)
self.assertEqual(part_counts, {
0: 128,
1: 128,
2: 64,
3: 64,
4: 64,
5: 64,
6: 64,
7: 64,
8: 64,
9: 64,
})
def test_server_per_port(self):
# 3 servers, 3 disks each, with each disk on its own port
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'})
rb.rebalance(seed=1)
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'})
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
poorly_dispersed = []
for part in range(rb.parts):
on_nodes = set()
for replica in range(rb.replicas):
dev_id = rb._replica2part2dev[replica][part]
on_nodes.add(rb.devs[dev_id]['ip'])
if len(on_nodes) < rb.replicas:
poorly_dispersed.append(part)
self.assertEqual(poorly_dispersed, [])
def test_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
real_pickle = pickle.load
fake_open = mock.mock_open()
io_error_not_found = IOError()
io_error_not_found.errno = errno.ENOENT
io_error_no_perm = IOError()
io_error_no_perm.errno = errno.EPERM
io_error_generic = IOError()
io_error_generic.errno = errno.EOPNOTSUPP
try:
# test a legit builder
fake_pickle = mock.Mock(return_value=rb)
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
self.assertEqual(fake_pickle.call_count, 1)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder, rb)
fake_pickle.reset_mock()
# test old style builder
fake_pickle.return_value = rb.to_dict()
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder.devs, rb.devs)
fake_pickle.reset_mock()
# test old devs but no meta
no_meta_builder = rb
for dev in no_meta_builder.devs:
del(dev['meta'])
fake_pickle.return_value = no_meta_builder
pickle.load = fake_pickle
builder = ring.RingBuilder.load('fake.builder', open=fake_open)
fake_open.assert_has_calls([mock.call('fake.builder', 'rb')])
self.assertEqual(builder.devs, rb.devs)
# test an empty builder
fake_pickle.side_effect = EOFError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test a corrupted builder
fake_pickle.side_effect = pickle.UnpicklingError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test some error
fake_pickle.side_effect = AttributeError
pickle.load = fake_pickle
self.assertRaises(exceptions.UnPicklingError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
finally:
pickle.load = real_pickle
# test non existent builder file
fake_open.side_effect = io_error_not_found
self.assertRaises(exceptions.FileNotFoundError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test non accessible builder file
fake_open.side_effect = io_error_no_perm
self.assertRaises(exceptions.PermissionError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
# test an error other then ENOENT and ENOPERM
fake_open.side_effect = io_error_generic
self.assertRaises(IOError,
ring.RingBuilder.load, 'fake.builder',
open=fake_open)
def test_save_load(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000,
'replication_ip': '127.0.0.0', 'replication_port': 10000,
'device': 'sda1', 'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001,
'replication_ip': '127.0.0.1', 'replication_port': 10001,
'device': 'sdb1', 'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002,
'replication_ip': '127.0.0.2', 'replication_port': 10002,
'device': 'sdc1', 'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003,
'replication_ip': '127.0.0.3', 'replication_port': 10003,
'device': 'sdd1', 'meta': ''}]
rb.set_overload(3.14159)
for d in devs:
rb.add_dev(d)
rb.rebalance()
builder_file = os.path.join(self.testdir, 'test_save.builder')
rb.save(builder_file)
loaded_rb = ring.RingBuilder.load(builder_file)
self.maxDiff = None
self.assertEqual(loaded_rb.to_dict(), rb.to_dict())
self.assertEqual(loaded_rb.overload, 3.14159)
@mock.patch('six.moves.builtins.open', autospec=True)
@mock.patch('swift.common.ring.builder.pickle.dump', autospec=True)
def test_save(self, mock_pickle_dump, mock_open):
mock_open.return_value = mock_fh = mock.MagicMock()
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
rb.save('some.builder')
mock_open.assert_called_once_with('some.builder', 'wb')
mock_pickle_dump.assert_called_once_with(rb.to_dict(),
mock_fh.__enter__(),
protocol=2)
def test_id(self):
rb = ring.RingBuilder(8, 3, 1)
# check id is assigned after save
builder_file = os.path.join(self.testdir, 'test_save.builder')
rb.save(builder_file)
assigned_id = rb.id
# check id doesn't change when builder is saved again
rb.save(builder_file)
self.assertEqual(assigned_id, rb.id)
# check same id after loading
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# check id doesn't change when loaded builder is saved
rb.save(builder_file)
self.assertEqual(assigned_id, rb.id)
# check same id after loading again
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# check id remains once assigned, even when save fails
with self.assertRaises(IOError):
rb.save(os.path.join(
self.testdir, 'non_existent_dir', 'test_save.file'))
self.assertEqual(assigned_id, rb.id)
# sanity check that different builders get different id's
other_rb = ring.RingBuilder(8, 3, 1)
other_builder_file = os.path.join(self.testdir, 'test_save_2.builder')
other_rb.save(other_builder_file)
self.assertNotEqual(assigned_id, other_rb.id)
def test_id_copy_from(self):
# copy_from preserves the same id
orig_rb = ring.RingBuilder(8, 3, 1)
copy_rb = ring.RingBuilder(8, 3, 1)
copy_rb.copy_from(orig_rb)
for rb in(orig_rb, copy_rb):
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
builder_file = os.path.join(self.testdir, 'test_save.builder')
orig_rb.save(builder_file)
copy_rb = ring.RingBuilder(8, 3, 1)
copy_rb.copy_from(orig_rb)
self.assertEqual(orig_rb.id, copy_rb.id)
def test_id_legacy_builder_file(self):
builder_file = os.path.join(self.testdir, 'legacy.builder')
def do_test():
# load legacy file
loaded_rb = ring.RingBuilder.load(builder_file)
with self.assertRaises(AttributeError) as cm:
loaded_rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
# check saving assigns an id, and that it is persisted
loaded_rb.save(builder_file)
assigned_id = loaded_rb.id
self.assertIsNotNone(assigned_id)
loaded_rb = ring.RingBuilder.load(builder_file)
self.assertEqual(assigned_id, loaded_rb.id)
# older builders had no id so the pickled builder dict had no id key
rb = ring.RingBuilder(8, 3, 1)
orig_to_dict = rb.to_dict
def mock_to_dict():
result = orig_to_dict()
result.pop('id')
return result
with mock.patch.object(rb, 'to_dict', mock_to_dict):
rb.save(builder_file)
do_test()
# even older builders pickled the class instance, which would have had
# no _id attribute
rb = ring.RingBuilder(8, 3, 1)
del rb.logger # logger type cannot be pickled
del rb._id
builder_file = os.path.join(self.testdir, 'legacy.builder')
with open(builder_file, 'wb') as f:
pickle.dump(rb, f, protocol=2)
do_test()
def test_id_not_initialised_errors(self):
rb = ring.RingBuilder(8, 3, 1)
# id is not set until builder has been saved
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
# save must succeed for id to be assigned
with self.assertRaises(IOError):
rb.save(os.path.join(
self.testdir, 'non-existent-dir', 'foo.builder'))
with self.assertRaises(AttributeError) as cm:
rb.id
self.assertIn('id attribute has not been initialised',
cm.exception.args[0])
def test_search_devs(self):
rb = ring.RingBuilder(8, 3, 1)
devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1',
'meta': 'meta0'},
{'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1',
'meta': 'meta1'},
{'id': 2, 'region': 1, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1',
'meta': 'meta2'},
{'id': 3, 'region': 1, 'zone': 3, 'weight': 2,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1',
'meta': 'meta3'},
{'id': 4, 'region': 2, 'zone': 4, 'weight': 1,
'ip': '127.0.0.4', 'port': 10004, 'device': 'sde1',
'meta': 'meta4', 'replication_ip': '127.0.0.10',
'replication_port': 20000},
{'id': 5, 'region': 2, 'zone': 5, 'weight': 2,
'ip': '127.0.0.5', 'port': 10005, 'device': 'sdf1',
'meta': 'meta5', 'replication_ip': '127.0.0.11',
'replication_port': 20001},
{'id': 6, 'region': 2, 'zone': 6, 'weight': 2,
'ip': '127.0.0.6', 'port': 10006, 'device': 'sdg1',
'meta': 'meta6', 'replication_ip': '127.0.0.12',
'replication_port': 20002}]
for d in devs:
rb.add_dev(d)
rb.rebalance()
res = rb.search_devs({'region': 0})
self.assertEqual(res, [devs[0], devs[1]])
res = rb.search_devs({'region': 1})
self.assertEqual(res, [devs[2], devs[3]])
res = rb.search_devs({'region': 1, 'zone': 2})
self.assertEqual(res, [devs[2]])
res = rb.search_devs({'id': 1})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'zone': 1})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1'})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'ip': '127.0.0.1', 'port': 10001})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'port': 10001})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'replication_ip': '127.0.0.10'})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'replication_ip': '127.0.0.10',
'replication_port': 20000})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'replication_port': 20000})
self.assertEqual(res, [devs[4]])
res = rb.search_devs({'device': 'sdb1'})
self.assertEqual(res, [devs[1]])
res = rb.search_devs({'meta': 'meta1'})
self.assertEqual(res, [devs[1]])
def test_validate(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 12, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 3, 'weight': 2,
'ip': '127.0.0.1', 'port': 10003, 'device': 'sda1'})
# Degenerate case: devices added but not rebalanced yet
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.rebalance()
counts = _partition_counts(rb, key='zone')
self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256})
dev_usage, worst = rb.validate()
self.assertIsNone(dev_usage)
self.assertIsNone(worst)
dev_usage, worst = rb.validate(stats=True)
self.assertEqual(list(dev_usage), [32, 32, 64, 64,
32, 32, 32, # added zone0
32, 32, 32, # added zone1
64, 64, 64, # added zone2
64, 64, 64, # added zone3
])
self.assertEqual(int(worst), 0)
# min part hours should pin all the parts assigned to this zero
# weight device onto it such that the balance will look horrible
rb.set_dev_weight(2, 0)
rb.rebalance()
self.assertEqual(rb.validate(stats=True)[1], MAX_BALANCE)
# Test not all partitions doubly accounted for
rb.devs[1]['parts'] -= 1
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['parts'] += 1
# Test non-numeric port
rb.devs[1]['port'] = '10001'
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb.devs[1]['port'] = 10001
# Test partition on nonexistent device
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = len(rb.devs)
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Tests that validate can handle 'holes' in .devs
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
rb.rebalance()
rb.validate(stats=True)
# Test partition assigned to a hole
if rb.devs[2]:
rb.remove_dev(2)
rb.pretend_min_part_hours_passed()
orig_dev_id = rb._replica2part2dev[0][0]
rb._replica2part2dev[0][0] = 2
self.assertRaises(exceptions.RingValidationError, rb.validate)
rb._replica2part2dev[0][0] = orig_dev_id
# Validate that zero weight devices with no partitions don't count on
# the 'worst' value.
self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE)
rb.add_dev({'id': 16, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
rb.pretend_min_part_hours_passed()
rb.rebalance()
self.assertNotEqual(rb.validate(stats=True)[1], MAX_BALANCE)
def test_validate_partial_replica(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
self.assertEqual(len(rb._replica2part2dev[0]), 256)
self.assertEqual(len(rb._replica2part2dev[1]), 256)
self.assertEqual(len(rb._replica2part2dev[2]), 128)
# now swap partial replica part maps
rb._replica2part2dev[1], rb._replica2part2dev[2] = \
rb._replica2part2dev[2], rb._replica2part2dev[1]
self.assertRaises(exceptions.RingValidationError, rb.validate)
def test_validate_duplicate_part_assignment(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc'})
rb.rebalance()
rb.validate() # sanity
# now double up a device assignment
rb._replica2part2dev[1][200] = rb._replica2part2dev[2][200]
with self.assertRaises(exceptions.RingValidationError) as e:
rb.validate()
expected = 'The partition 200 has been assigned to duplicate devices'
self.assertIn(expected, str(e.exception))
def test_get_part_devices(self):
rb = ring.RingBuilder(8, 3, 1)
self.assertEqual(rb.get_part_devices(0), [])
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance()
part_devs = sorted(rb.get_part_devices(0),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[0], rb.devs[1], rb.devs[2]])
def test_get_part_devices_partial_replicas(self):
rb = ring.RingBuilder(8, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.rebalance(seed=4)
# note: partition 255 will only have 2 replicas
part_devs = sorted(rb.get_part_devices(255),
key=operator.itemgetter('id'))
self.assertEqual(part_devs, [rb.devs[1], rb.devs[2]])
def test_dispersion_with_zero_weight_devices(self):
rb = ring.RingBuilder(8, 3.0, 0)
# add two devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# and a zero weight device
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 0,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance()
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 2): [0, 256, 0, 0],
})
def test_dispersion_with_zero_weight_devices_with_parts(self):
rb = ring.RingBuilder(8, 3.0, 1)
# add four devices to a single server in a single zone
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.rebalance(seed=1)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# now mark a device 2 for decom
rb.set_dev_weight(2, 0.0)
# we'll rebalance but can't move any parts
rb.rebalance(seed=1)
# zero weight tier has one copy of 1/4 part-replica
self.assertEqual(rb.dispersion, 25.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 1): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 2): [64, 192, 0, 0],
(0, 0, '127.0.0.1', 3): [64, 192, 0, 0],
})
# unlock the stuck parts
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=3)
self.assertEqual(rb.dispersion, 0.0)
self.assertEqual(rb._dispersion_graph, {
(0,): [0, 0, 0, 256],
(0, 0): [0, 0, 0, 256],
(0, 0, '127.0.0.1'): [0, 0, 0, 256],
(0, 0, '127.0.0.1', 0): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 1): [0, 256, 0, 0],
(0, 0, '127.0.0.1', 3): [0, 256, 0, 0],
})
@unittest.skipIf(sys.version_info >= (3,),
"Seed-specific tests don't work well on py3")
def test_undispersable_zone_converge_on_balance(self):
rb = ring.RingBuilder(8, 6, 0)
dev_id = 0
# 3 regions, 2 zone for each region, 1 server with only *one* device in
# each zone (this is an absolutely pathological case)
for r in range(3):
for z in range(2):
ip = '127.%s.%s.1' % (r, z)
dev_id += 1
rb.add_dev({'id': dev_id, 'region': r, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
rb.rebalance(seed=7)
# sanity, all balanced and 0 dispersion
self.assertEqual(rb.get_balance(), 0)
self.assertEqual(rb.dispersion, 0)
# add one device to the server in z1 for each region, N.B. when we
# *balance* this topology we will have very bad dispersion (too much
# weight in z1 compared to z2!)
for r in range(3):
z = 0
ip = '127.%s.%s.1' % (r, z)
dev_id += 1
rb.add_dev({'id': dev_id, 'region': r, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
changed_part, _, _ = rb.rebalance(seed=7)
# sanity, all part but only one replica moved to new devices
self.assertEqual(changed_part, 2 ** 8)
# so the first time, rings are still unbalanced becase we'll only move
# one replica of each part.
self.assertEqual(rb.get_balance(), 50.1953125)
self.assertEqual(rb.dispersion, 16.6015625)
# N.B. since we mostly end up grabbing parts by "weight forced" some
# seeds given some specific ring state will randomly pick bad
# part-replicas that end up going back down onto the same devices
changed_part, _, _ = rb.rebalance(seed=7)
self.assertEqual(changed_part, 14)
# ... this isn't a really "desirable" behavior, but even with bad luck,
# things do get better
self.assertEqual(rb.get_balance(), 47.265625)
self.assertEqual(rb.dispersion, 16.6015625)
# but if you stick with it, eventually the next rebalance, will get to
# move "the right" part-replicas, resulting in near optimal balance
changed_part, _, _ = rb.rebalance(seed=7)
self.assertEqual(changed_part, 240)
self.assertEqual(rb.get_balance(), 0.390625)
self.assertEqual(rb.dispersion, 16.6015625)
@unittest.skipIf(sys.version_info >= (3,),
"Seed-specific tests don't work well on py3")
def test_undispersable_server_converge_on_balance(self):
rb = ring.RingBuilder(8, 6, 0)
dev_id = 0
# 3 zones, 2 server for each zone, 2 device for each server
for z in range(3):
for i in range(2):
ip = '127.0.%s.%s' % (z, i + 1)
for d in range(2):
dev_id += 1
rb.add_dev({'id': dev_id, 'region': 1, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
rb.rebalance(seed=7)
# sanity, all balanced and 0 dispersion
self.assertEqual(rb.get_balance(), 0)
self.assertEqual(rb.dispersion, 0)
# add one device for first server for each zone
for z in range(3):
ip = '127.0.%s.1' % z
dev_id += 1
rb.add_dev({'id': dev_id, 'region': 1, 'zone': z,
'weight': 1000, 'ip': ip, 'port': 10000,
'device': 'd%s' % dev_id})
changed_part, _, _ = rb.rebalance(seed=7)
# sanity, all part but only one replica moved to new devices
self.assertEqual(changed_part, 2 ** 8)
# but the first time, those are still unbalance becase ring builder
# can move only one replica for each part
self.assertEqual(rb.get_balance(), 16.9921875)
self.assertEqual(rb.dispersion, 9.9609375)
rb.rebalance(seed=7)
# converge into around 0~1
self.assertGreaterEqual(rb.get_balance(), 0)
self.assertLess(rb.get_balance(), 1)
# dispersion doesn't get any worse
self.assertEqual(rb.dispersion, 9.9609375)
def test_effective_overload(self):
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
# z2
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# this ring requires overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
# and we'll use a little bit
rb.set_overload(0.1)
rb.rebalance(seed=7)
rb.validate()
# but with-out enough overload we're not dispersed
self.assertGreater(rb.dispersion, 0)
# add the other dev to z2
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdc'})
# but also fail another device in the same!
rb.remove_dev(6)
# we still require overload
required = rb.get_required_overload()
self.assertGreater(required, 0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=7)
rb.validate()
# ... and without enough we're full dispersed
self.assertGreater(rb.dispersion, 0)
# ok, let's fix z2's weight for real
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
# ... technically, we no longer require overload
self.assertEqual(rb.get_required_overload(), 0.0)
# so let's rebalance w/o resetting min_part_hours
rb.rebalance(seed=7)
rb.validate()
# ... and that got it in one pass boo-yah!
self.assertEqual(rb.dispersion, 0)
def zone_weights_over_device_count(self):
rb = ring.RingBuilder(8, 3, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
# z2
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 200,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.rebalance(seed=7)
rb.validate()
self.assertEqual(rb.dispersion, 0)
self.assertAlmostEqual(rb.get_balance(), (1.0 / 3.0) * 100)
def test_more_devices_than_replicas_validation_when_removed_dev(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sdc'})
rb.rebalance()
rb.remove_dev(2)
with self.assertRaises(ValueError) as e:
rb.set_dev_weight(2, 1)
msg = "Can not set weight of dev_id 2 because it is marked " \
"for removal"
self.assertIn(msg, str(e.exception))
with self.assertRaises(exceptions.RingValidationError) as e:
rb.rebalance()
msg = 'Replica count of 3 requires more than 2 devices'
self.assertIn(msg, str(e.exception))
def _add_dev_delete_first_n(self, add_dev_count, n):
rb = ring.RingBuilder(8, 3, 1)
dev_names = ['sda', 'sdb', 'sdc', 'sdd', 'sde', 'sdf']
for i in range(add_dev_count):
if i < len(dev_names):
dev_name = dev_names[i]
else:
dev_name = 'sda'
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': dev_name})
rb.rebalance()
if (n > 0):
rb.pretend_min_part_hours_passed()
# remove first n
for i in range(n):
rb.remove_dev(i)
rb.pretend_min_part_hours_passed()
rb.rebalance()
return rb
def test_reuse_of_dev_holes_without_id(self):
# try with contiguous holes at beginning
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0,
'device': 'sda'})
self.assertLess(new_dev_id, add_dev_count)
# try with non-contiguous holes
# [0, 1, None, 3, 4, None]
rb2 = ring.RingBuilder(8, 3, 1)
for i in range(6):
rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
rb2.rebalance()
rb2.pretend_min_part_hours_passed()
rb2.remove_dev(2)
rb2.remove_dev(5)
rb2.pretend_min_part_hours_passed()
rb2.rebalance()
first = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
second = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
# add a new one (without reusing a hole)
third = rb2.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0, 'device': 'sda'})
self.assertEqual(first, 2)
self.assertEqual(second, 5)
self.assertEqual(third, 6)
def test_reuse_of_dev_holes_with_id(self):
add_dev_count = 6
rb = self._add_dev_delete_first_n(add_dev_count, add_dev_count - 3)
# add specifying id
exp_new_dev_id = 2
# [dev, dev, None, dev, dev, None]
try:
new_dev_id = rb.add_dev({'id': exp_new_dev_id, 'region': 0,
'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'weight': 1.0,
'device': 'sda'})
self.assertEqual(new_dev_id, exp_new_dev_id)
except exceptions.DuplicateDeviceError:
self.fail("device hole not reused")
def test_prepare_increase_partition_power(self):
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
self.assertFalse(rb.cancel_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertIsNone(rb.next_part_power)
self.assertFalse(rb.finish_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertIsNone(rb.next_part_power)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertEqual(rb.next_part_power, 9)
# Save .ring.gz, and load ring from it to ensure prev/next is set
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
expected_part_shift = 32 - 8
self.assertEqual(expected_part_shift, r._part_shift)
self.assertEqual(9, r.next_part_power)
def test_increase_partition_power(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
# Let's save the ring, and get the nodes for an object
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
old_part, old_nodes = r.get_nodes("acc", "cont", "obj")
old_version = rb.version
self.assertTrue(rb.prepare_increase_partition_power())
self.assertTrue(rb.increase_partition_power())
rb.validate()
changed_parts, _balance, removed_devs = rb.rebalance()
self.assertEqual(changed_parts, 0)
self.assertEqual(removed_devs, 0)
# Make sure cancellation is not possible
# after increasing the partition power
self.assertFalse(rb.cancel_increase_partition_power())
old_ring = r
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
new_part, new_nodes = r.get_nodes("acc", "cont", "obj")
# sanity checks
self.assertEqual(9, rb.part_power)
self.assertEqual(9, rb.next_part_power)
self.assertEqual(rb.version, old_version + 3)
# make sure there is always the same device assigned to every pair of
# partitions
for replica in rb._replica2part2dev:
for part in range(0, len(replica), 2):
dev = replica[part]
next_dev = replica[part + 1]
self.assertEqual(dev, next_dev)
# same for last_part moves
for part in range(0, rb.parts, 2):
this_last_moved = rb._last_part_moves[part]
next_last_moved = rb._last_part_moves[part + 1]
self.assertEqual(this_last_moved, next_last_moved)
for i in range(100):
suffix = uuid.uuid4()
account = 'account_%s' % suffix
container = 'container_%s' % suffix
obj = 'obj_%s' % suffix
old_part, old_nodes = old_ring.get_nodes(account, container, obj)
new_part, new_nodes = r.get_nodes(account, container, obj)
# Due to the increased partition power, the partition each object
# is assigned to has changed. If the old partition was X, it will
# now be either located in 2*X or 2*X+1
self.assertIn(new_part, [old_part * 2, old_part * 2 + 1])
# Importantly, we expect the objects to be placed on the same
# nodes after increasing the partition power
self.assertEqual(old_nodes, new_nodes)
def test_finalize_increase_partition_power(self):
ring_file = os.path.join(self.testdir, 'test_partpower.ring.gz')
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
self.assertTrue(rb.prepare_increase_partition_power())
# Make sure this doesn't do any harm before actually increasing the
# partition power
self.assertFalse(rb.finish_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
self.assertTrue(rb.increase_partition_power())
self.assertFalse(rb.prepare_increase_partition_power())
self.assertEqual(rb.part_power, 9)
self.assertEqual(rb.next_part_power, 9)
self.assertTrue(rb.finish_increase_partition_power())
self.assertEqual(rb.part_power, 9)
self.assertIsNone(rb.next_part_power)
# Save .ring.gz, and load ring from it to ensure prev/next is set
rd = rb.get_ring()
rd.save(ring_file)
r = ring.Ring(ring_file)
expected_part_shift = 32 - 9
self.assertEqual(expected_part_shift, r._part_shift)
self.assertIsNone(r.next_part_power)
def test_prepare_increase_partition_power_failed(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
# next_part_power is still set, do not increase again
self.assertFalse(rb.prepare_increase_partition_power())
self.assertEqual(rb.next_part_power, 9)
def test_increase_partition_power_failed(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
# next_part_power not set, can't increase the part power
self.assertFalse(rb.increase_partition_power())
self.assertEqual(rb.part_power, 8)
self.assertTrue(rb.prepare_increase_partition_power())
self.assertTrue(rb.increase_partition_power())
self.assertEqual(rb.part_power, 9)
# part_power already increased
self.assertFalse(rb.increase_partition_power())
self.assertEqual(rb.part_power, 9)
def test_cancel_increase_partition_power(self):
rb = ring.RingBuilder(8, 3.0, 1)
self.assertEqual(rb.part_power, 8)
# add more devices than replicas to the ring
for i in range(10):
dev = "sdx%s" % i
rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': dev})
rb.rebalance(seed=1)
old_version = rb.version
self.assertTrue(rb.prepare_increase_partition_power())
# sanity checks
self.assertEqual(8, rb.part_power)
self.assertEqual(9, rb.next_part_power)
self.assertEqual(rb.version, old_version + 1)
self.assertTrue(rb.cancel_increase_partition_power())
rb.validate()
self.assertEqual(8, rb.part_power)
self.assertEqual(8, rb.next_part_power)
self.assertEqual(rb.version, old_version + 2)
class TestGetRequiredOverload(unittest.TestCase):
maxDiff = None
def test_none_needed(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
# 4 equal-weight devs and 3 replicas: this can be balanced without
# resorting to overload at all
self.assertAlmostEqual(rb.get_required_overload(), 0)
expected = {
(0, 0, '127.0.0.1', 0): 0.75,
(0, 0, '127.0.0.1', 1): 0.75,
(0, 0, '127.0.0.1', 2): 0.75,
(0, 0, '127.0.0.1', 3): 0.75,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# since no overload is needed, target_replicas is the same
rb.set_overload(0.10)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# ... no matter how high you go!
rb.set_overload(100.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# 3 equal-weight devs and 3 replicas: this can also be balanced
rb.remove_dev(3)
self.assertAlmostEqual(rb.get_required_overload(), 0)
expected = {
(0, 0, '127.0.0.1', 0): 1.0,
(0, 0, '127.0.0.1', 1): 1.0,
(0, 0, '127.0.0.1', 2): 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# ... still no overload
rb.set_overload(100.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_equal_replica_and_devices_count_ignore_weights(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 7.47,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 5.91,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 6.44,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
expected = {
0: 1.0,
1: 1.0,
2: 1.0,
}
# simplicity itself
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_weighted_replicas_by_tier().items()
if len(t) == 4})
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_wanted_replicas_by_tier().items()
if len(t) == 4})
self.assertEqual(expected, {
t[-1]: r for (t, r) in
rb._build_target_replicas_by_tier().items()
if len(t) == 4})
# ... no overload required!
self.assertEqual(0, rb.get_required_overload())
rb.rebalance()
expected = {
0: 256,
1: 256,
2: 256,
}
self.assertEqual(expected, {d['id']: d['parts'] for d in
rb._iter_devs()})
def test_small_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 4,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 1.0434782608695652,
(0, 1): 1.0434782608695652,
(0, 2): 0.9130434782608695,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# the device tier is interesting because one of the devices in zone
# two has a different weight
expected = {
0: 0.5217391304347826,
1: 0.5217391304347826,
2: 0.5217391304347826,
3: 0.5217391304347826,
4: 0.5217391304347826,
5: 0.3913043478260869,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
# ... but, each pair of devices still needs to hold a whole
# replicanth; which we'll try distribute fairly among devices in
# zone 2, so that they can share the burden and ultimately the
# required overload will be as small as possible.
expected = {
0: 0.5,
1: 0.5,
2: 0.5,
3: 0.5,
4: 0.5714285714285715,
5: 0.42857142857142855,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# full dispersion requires zone two's devices to eat more than
# they're weighted for
self.assertAlmostEqual(rb.get_required_overload(), 0.095238,
delta=1e-5)
# so... if we give it enough overload it we should get full dispersion
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_multiple_small_zones(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'weight': 150,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 2.1052631578947367,
(0, 1): 0.47368421052631576,
(0, 2): 0.21052631578947367,
(0, 3): 0.21052631578947367,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
# without any overload, we get weight
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.49999999999999994,
(0, 3): 0.49999999999999994,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{t: r
for (t, r) in wanted_replicas.items()
if len(t) == 2})
self.assertEqual(1.3750000000000002, rb.get_required_overload())
# with enough overload we get the full dispersion
rb.set_overload(1.5)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
# with not enough overload, we get somewhere in the middle
rb.set_overload(1.0)
expected = {
(0, 0): 1.3014354066985647,
(0, 1): 0.8564593301435406,
(0, 2): 0.4210526315789473,
(0, 3): 0.4210526315789473,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: r
for (tier, r) in target_replicas.items()
if len(tier) == 2})
def test_big_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 1.0714285714285714,
(0, 1): 0.6428571428571429,
(0, 2): 0.6428571428571429,
(0, 3): 0.6428571428571429,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 0.6666666666666667,
(0, 2): 0.6666666666666667,
(0, 3): 0.6666666666666667,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# when all the devices and servers in a zone are evenly weighted
# it will accurately proxy their required overload, all the
# zones besides 0 require the same overload
t = random.choice([t for t in weighted_replicas
if len(t) == 2
and t[1] != 0])
expected_overload = ((wanted_replicas[t] - weighted_replicas[t])
/ weighted_replicas[t])
self.assertAlmostEqual(rb.get_required_overload(),
expected_overload)
# but if you only give it out half of that
rb.set_overload(expected_overload / 2.0)
# ... you can expect it's not going to full disperse
expected = {
(0, 0): 1.0357142857142856,
(0, 1): 0.6547619047619049,
(0, 2): 0.6547619047619049,
(0, 3): 0.6547619047619049,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_enormous_zone(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 500,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 60,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 2, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 3, 'weight': 60,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 2.542372881355932,
(0, 1): 0.15254237288135591,
(0, 2): 0.15254237288135591,
(0, 3): 0.15254237288135591,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 0.6666666666666667,
(0, 2): 0.6666666666666667,
(0, 3): 0.6666666666666667,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# ouch, those "tiny" devices need to hold 3x more than their
# weighted for!
self.assertAlmostEqual(rb.get_required_overload(), 3.370370,
delta=1e-5)
# let's get a little crazy, and let devices eat up to 1x more than
# their capacity is weighted for - see how far that gets us...
rb.set_overload(1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 2.084745762711864,
(0, 1): 0.30508474576271183,
(0, 2): 0.30508474576271183,
(0, 3): 0.30508474576271183,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_two_big_two_small(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 45,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 35,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
expected = {
(0, 0): 1.0714285714285714,
(0, 1): 1.0714285714285714,
(0, 2): 0.48214285714285715,
(0, 3): 0.375,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.5625,
(0, 3): 0.43749999999999994,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# I'm not sure it's significant or coincidental that the devices
# in zone 2 & 3 who end up splitting the 3rd replica turn out to
# need to eat ~1/6th extra replicanths
self.assertAlmostEqual(rb.get_required_overload(), 1.0 / 6.0)
# ... *so* 10% isn't *quite* enough
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 1.0285714285714285,
(0, 1): 1.0285714285714285,
(0, 2): 0.5303571428571429,
(0, 3): 0.4125,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
# ... but 20% will do the trick!
rb.set_overload(0.2)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 0.5625,
(0, 3): 0.43749999999999994,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_multiple_replicas_each(self):
rb = ring.RingBuilder(8, 7, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 80,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 70,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
expected = {
(0, 0): 4.117647058823529,
(0, 1): 2.8823529411764706,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
expected = {
(0, 0): 4.0,
(0, 1): 3.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
# I guess 2.88 => 3.0 is about a 4% increase
self.assertAlmostEqual(rb.get_required_overload(),
0.040816326530612256)
# ... 10% is plenty enough here
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 2})
def test_small_extra_server_in_zone_with_multiple_replicas(self):
rb = ring.RingBuilder(8, 5, 1)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 1000})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 1000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdc', 'weight': 1000})
# z1 - extra small server
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 50})
expected = {
(0, 0): 2.479338842975207,
(0, 1): 2.5206611570247937,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
weighted_replicas.items()
if len(t) == 2})
# dispersion is fine with this at the zone tier
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t: r for (t, r) in
wanted_replicas.items()
if len(t) == 2})
# ... but not ok with that tiny server
expected = {
'127.0.0.1': 2.479338842975207,
'127.0.0.2': 1.5206611570247937,
'127.0.0.3': 1.0,
}
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 3})
self.assertAlmostEqual(23.2, rb.get_required_overload())
def test_multiple_replicas_in_zone_with_single_device(self):
rb = ring.RingBuilder(8, 5, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sdb', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdc', 'weight': 100})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdd', 'weight': 100})
# first things first, make sure we do this right
rb.rebalance()
# each device get's a sing replica of every part
expected = {
0: 256,
1: 256,
2: 256,
3: 256,
4: 256,
}
self.assertEqual(expected, {d['id']: d['parts']
for d in rb._iter_devs()})
# but let's make sure we're thinking about it right too
expected = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
}
# by weight everyone is equal
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# wanted might have liked to have fewer replicas in z1, but the
# single device in z0 limits us one replica per device
with rb.debug():
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
wanted_replicas.items()
if len(t) == 4})
# even with some overload - still one replica per device
rb.set_overload(1.0)
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# when overload can not change the outcome none is required
self.assertEqual(0.0, rb.get_required_overload())
# even though dispersion is terrible (in z1 particularly)
self.assertEqual(20.0, rb.dispersion)
def test_one_big_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# obviously d5 gets one whole replica; the other two replicas
# are split evenly among the five other devices
# (i.e. ~0.4 replicanths for each 100 units of weight)
expected = {
0: 0.39999999999999997,
1: 0.39999999999999997,
2: 0.39999999999999997,
3: 0.39999999999999997,
4: 0.39999999999999997,
5: 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get the "balanced" placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# but in reality, these devices having such disparate weights
# leads to a *terrible* balance even w/o overload!
rb.rebalance(seed=9)
self.assertEqual(rb.get_balance(), 1308.2031249999998)
# even though part assignment is pretty reasonable
expected = {
0: 103,
1: 102,
2: 103,
3: 102,
4: 102,
5: 256,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# so whats happening is the small devices are holding *way* more
# *real* parts than their *relative* portion of the weight would
# like them too!
expected = {
0: 1308.2031249999998,
1: 1294.5312499999998,
2: 1308.2031249999998,
3: 1294.5312499999998,
4: 1294.5312499999998,
5: -65.0,
}
self.assertEqual(expected, rb._build_balance_per_dev())
# increasing overload moves towards one replica in each tier
rb.set_overload(0.20)
expected = {
0: 0.48,
1: 0.48,
2: 0.48,
3: 0.48,
4: 0.30857142857142855,
5: 0.7714285714285714,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# ... and as always increasing overload makes balance *worse*
rb.rebalance(seed=17)
self.assertEqual(rb.get_balance(), 1581.6406249999998)
# but despite the overall trend toward imbalance, in the tier with the
# huge device, we want to see the small device (d4) try to shed parts
# as effectively as it can to the huge device in the same tier (d5)
# this is a useful behavior anytime when for whatever reason a device
# w/i a tier wants parts from another device already in the same tier
# another example is `test_one_small_guy_does_not_spoil_his_buddy`
expected = {
0: 123,
1: 123,
2: 123,
3: 123,
4: 79,
5: 197,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# *see*, at least *someones* balance is getting better!
expected = {
0: 1581.6406249999998,
1: 1581.6406249999998,
2: 1581.6406249999998,
3: 1581.6406249999998,
4: 980.078125,
5: -73.06640625,
}
self.assertEqual(expected, rb._build_balance_per_dev())
def test_one_small_guy_does_not_spoil_his_buddy(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 10000})
# z2
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 10000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 100})
# it's almost like 3.0 / 5 ~= 0.6, but that one little guy get's
# his fair share
expected = {
0: 0.5988023952095808,
1: 0.5988023952095808,
2: 0.5988023952095808,
3: 0.5988023952095808,
4: 0.5988023952095808,
5: 0.005988023952095809,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
weighted_replicas.items()
if len(t) == 4})
# with no overload we get a nice balanced placement
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
rb.rebalance(seed=9)
# part placement looks goods
expected = {
0: 154,
1: 153,
2: 153,
3: 153,
4: 153,
5: 2,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# ... balance is a little lumpy on the small guy since he wants
# one and a half parts :\
expected = {
0: 0.4609375000000142,
1: -0.1914062499999858,
2: -0.1914062499999858,
3: -0.1914062499999858,
4: -0.1914062499999858,
5: 30.46875,
}
self.assertEqual(expected, rb._build_balance_per_dev())
self.assertEqual(rb.get_balance(), 30.46875)
# increasing overload moves towards one replica in each tier
rb.set_overload(0.3)
expected = {
0: 0.553443113772455,
1: 0.553443113772455,
2: 0.553443113772455,
3: 0.553443113772455,
4: 0.778443113772455,
5: 0.007784431137724551,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# ... and as always increasing overload makes balance *worse*
rb.rebalance(seed=12)
self.assertEqual(rb.get_balance(), 30.46875)
# the little guy it really struggling to take his share tho
expected = {
0: 142,
1: 141,
2: 142,
3: 141,
4: 200,
5: 2,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# ... and you can see it in the balance!
expected = {
0: -7.367187499999986,
1: -8.019531249999986,
2: -7.367187499999986,
3: -8.019531249999986,
4: 30.46875,
5: 30.46875,
}
self.assertEqual(expected, rb._build_balance_per_dev())
rb.set_overload(0.5)
expected = {
0: 0.5232035928143712,
1: 0.5232035928143712,
2: 0.5232035928143712,
3: 0.5232035928143712,
4: 0.8982035928143712,
5: 0.008982035928143714,
}
target_replicas = rb._build_target_replicas_by_tier()
self.assertEqual(expected, {t[-1]: r for (t, r) in
target_replicas.items()
if len(t) == 4})
# because the device is so small, balance get's bad quick
rb.rebalance(seed=17)
self.assertEqual(rb.get_balance(), 95.703125)
# but despite the overall trend toward imbalance, the little guy
# isn't really taking on many new parts!
expected = {
0: 134,
1: 134,
2: 134,
3: 133,
4: 230,
5: 3,
}
self.assertEqual(expected, {
d['id']: d['parts'] for d in rb._iter_devs()})
# *see*, at everyone's balance is getting worse *together*!
expected = {
0: -12.585937499999986,
1: -12.585937499999986,
2: -12.585937499999986,
3: -13.238281249999986,
4: 50.0390625,
5: 95.703125,
}
self.assertEqual(expected, rb._build_balance_per_dev())
def test_two_servers_with_more_than_one_replica(self):
rb = ring.RingBuilder(8, 3, 0)
# z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 60})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 60})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 60})
# z1
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 80})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 128})
# z2
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.2.1',
'port': 6200, 'device': 'sda', 'weight': 80})
rb.add_dev({'id': 6, 'region': 0, 'zone': 2, 'ip': '127.0.2.2',
'port': 6200, 'device': 'sda', 'weight': 240})
rb.set_overload(0.1)
rb.rebalance()
self.assertEqual(12.161458333333343, rb.get_balance())
replica_plan = rb._build_target_replicas_by_tier()
for dev in rb._iter_devs():
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
expected_parts = replica_plan[tier] * rb.parts
self.assertAlmostEqual(dev['parts'], expected_parts,
delta=1)
def test_multi_zone_with_failed_device(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# sanity, balanced and dispersed
expected = {
(0, 0): 1.0,
(0, 1): 1.0,
(0, 2): 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 2})
self.assertEqual(rb.get_required_overload(), 0.0)
# fail a device in zone 2
rb.remove_dev(4)
expected = {
0: 0.6,
1: 0.6,
2: 0.6,
3: 0.6,
5: 0.6,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
expected = {
0: 0.5,
1: 0.5,
2: 0.5,
3: 0.5,
5: 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 4})
# does this make sense? every zone was holding 1/3rd of the
# replicas, so each device was 1/6th, remove a device and
# suddenly it's holding *both* sixths which is 2/3rds?
self.assertAlmostEqual(rb.get_required_overload(), 2.0 / 3.0)
# 10% isn't nearly enough
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.585,
1: 0.585,
2: 0.585,
3: 0.585,
5: 0.6599999999999999,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# 50% isn't even enough
rb.set_overload(0.5)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.525,
1: 0.525,
2: 0.525,
3: 0.525,
5: 0.8999999999999999,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
# even 65% isn't enough (but it's getting closer)
rb.set_overload(0.65)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
0: 0.5025000000000001,
1: 0.5025000000000001,
2: 0.5025000000000001,
3: 0.5025000000000001,
5: 0.99,
}
self.assertEqual(expected,
{tier[3]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 4})
def test_balanced_zones_unbalanced_servers(self):
rb = ring.RingBuilder(8, 3, 1)
# zone 0 server 127.0.0.1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 3000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 3000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 3000})
# zone 1 server 127.0.0.2
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 4000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 4000})
# zone 1 (again) server 127.0.0.3
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 1000})
weighted_replicas = rb._build_weighted_replicas_by_tier()
# zones are evenly weighted
expected = {
(0, 0): 1.5,
(0, 1): 1.5,
}
self.assertEqual(expected,
{tier: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 2})
# ... but servers are not
expected = {
'127.0.0.1': 1.5,
'127.0.0.2': 1.3333333333333333,
'127.0.0.3': 0.16666666666666666,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
# make sure wanted will even it out
expected = {
'127.0.0.1': 1.5,
'127.0.0.2': 1.0,
'127.0.0.3': 0.4999999999999999,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
# so it wants 1/6th and eats 1/2 - that's 2/6ths more than it
# wants which is a 200% increase
self.assertAlmostEqual(rb.get_required_overload(), 2.0)
# the overload doesn't effect the tiers that are already dispersed
rb.set_overload(1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
'127.0.0.1': 1.5,
# notice with half the overload 1/6th replicanth swapped servers
'127.0.0.2': 1.1666666666666665,
'127.0.0.3': 0.3333333333333333,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 3})
def test_adding_second_zone(self):
rb = ring.RingBuilder(3, 3, 1)
# zone 0 server 127.0.0.1
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# zone 0 server 127.0.0.2
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# zone 0 server 127.0.0.3
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'ip': '127.0.0.3',
'port': 6200, 'device': 'sdb', 'weight': 2000})
# sanity, balanced and dispersed
expected = {
'127.0.0.1': 1.0,
'127.0.0.2': 1.0,
'127.0.0.3': 1.0,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
self.assertEqual(rb.get_required_overload(), 0)
# start adding a second zone
# zone 1 server 127.0.1.1
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'ip': '127.0.1.1',
'port': 6200, 'device': 'sdb', 'weight': 100})
# zone 1 server 127.0.1.2
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 9, 'region': 0, 'zone': 1, 'ip': '127.0.1.2',
'port': 6200, 'device': 'sdb', 'weight': 100})
# zone 1 server 127.0.1.3
rb.add_dev({'id': 10, 'region': 0, 'zone': 1, 'ip': '127.0.1.3',
'port': 6200, 'device': 'sda', 'weight': 100})
rb.add_dev({'id': 11, 'region': 0, 'zone': 1, 'ip': '127.0.1.3',
'port': 6200, 'device': 'sdb', 'weight': 100})
# this messes things up pretty royally
expected = {
'127.0.0.1': 0.9523809523809523,
'127.0.0.2': 0.9523809523809523,
'127.0.0.3': 0.9523809523809523,
'127.0.1.1': 0.047619047619047616,
'127.0.1.2': 0.047619047619047616,
'127.0.1.3': 0.047619047619047616,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
expected = {
'127.0.0.1': 0.6666666666666667,
'127.0.0.2': 0.6666666666666667,
'127.0.0.3': 0.6666666666666667,
'127.0.1.1': 0.3333333333333333,
'127.0.1.2': 0.3333333333333333,
'127.0.1.3': 0.3333333333333333,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in wanted_replicas.items()
if len(tier) == 3})
# so dispersion would require these devices hold 6x more than
# prescribed by weight, defeating any attempt at gradually
# anything
self.assertAlmostEqual(rb.get_required_overload(), 6.0)
# so let's suppose we only allow for 10% overload
rb.set_overload(0.10)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
# we expect servers in zone 0 to be between 0.952 and 0.666
'127.0.0.1': 0.9476190476190476,
'127.0.0.2': 0.9476190476190476,
'127.0.0.3': 0.9476190476190476,
# we expect servers in zone 1 to be between 0.0476 and 0.333
# and in fact its ~10% increase (very little compared to 6x!)
'127.0.1.1': 0.052380952380952375,
'127.0.1.2': 0.052380952380952375,
'127.0.1.3': 0.052380952380952375,
}
self.assertEqual(expected,
{tier[2]: weighted
for (tier, weighted) in target_replicas.items()
if len(tier) == 3})
def test_gradual_replica_count(self):
rb = ring.RingBuilder(3, 2.5, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6200, 'device': 'sdb', 'weight': 2000})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sda', 'weight': 2000})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'ip': '127.0.0.2',
'port': 6200, 'device': 'sdb', 'weight': 2000})
expected = {
0: 0.625,
1: 0.625,
2: 0.625,
3: 0.625,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier[3]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 4})
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {
tier[3]: wanted
for (tier, wanted) in wanted_replicas.items()
if len(tier) == 4})
self.assertEqual(rb.get_required_overload(), 0)
# server 127.0.0.2 will have only one device
rb.remove_dev(2)
# server 127.0.0.1 has twice the capacity of 127.0.0.2
expected = {
'127.0.0.1': 1.6666666666666667,
'127.0.0.2': 0.8333333333333334,
}
weighted_replicas = rb._build_weighted_replicas_by_tier()
self.assertEqual(expected, {
tier[2]: weighted
for (tier, weighted) in weighted_replicas.items()
if len(tier) == 3})
# dispersion requirements extend only to whole replicas
expected = {
'127.0.0.1': 1.4999999999999998,
'127.0.0.2': 1.0,
}
wanted_replicas = rb._build_wanted_replicas_by_tier()
self.assertEqual(expected, {
tier[2]: wanted
for (tier, wanted) in wanted_replicas.items()
if len(tier) == 3})
# 5/6ths to a whole replicanth is a 20% increase
self.assertAlmostEqual(rb.get_required_overload(), 0.2)
# so let's suppose we only allow for 10% overload
rb.set_overload(0.1)
target_replicas = rb._build_target_replicas_by_tier()
expected = {
'127.0.0.1': 1.5833333333333333,
'127.0.0.2': 0.9166666666666667,
}
self.assertEqual(expected, {
tier[2]: wanted
for (tier, wanted) in target_replicas.items()
if len(tier) == 3})
def test_perfect_four_zone_four_replica_bad_placement(self):
rb = ring.RingBuilder(4, 4, 1)
# this weight is sorta nuts, but it's really just to help the
# weight_of_one_part hit a magic number where floats mess up
# like they would on ring with a part power of 19 and 100's of
# 1000's of units of weight.
weight = 21739130434795e-11
# r0z0
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': weight,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': weight,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
# r0z1
rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': weight,
'ip': '127.0.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': weight,
'ip': '127.0.1.2', 'port': 10000, 'device': 'sdb'})
# r1z0
rb.add_dev({'id': 4, 'region': 1, 'zone': 0, 'weight': weight,
'ip': '127.1.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': weight,
'ip': '127.1.0.2', 'port': 10000, 'device': 'sdb'})
# r1z1
rb.add_dev({'id': 6, 'region': 1, 'zone': 1, 'weight': weight,
'ip': '127.1.1.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 1, 'zone': 1, 'weight': weight,
'ip': '127.1.1.2', 'port': 10000, 'device': 'sdb'})
# the replica plan is sound
expectations = {
# tier_len => expected replicas
1: {
(0,): 2.0,
(1,): 2.0,
},
2: {
(0, 0): 1.0,
(0, 1): 1.0,
(1, 0): 1.0,
(1, 1): 1.0,
}
}
wr = rb._build_replica_plan()
for tier_len, expected in expectations.items():
self.assertEqual(expected, {t: r['max'] for (t, r) in
wr.items() if len(t) == tier_len})
# even thought a naive ceil of weights is surprisingly wrong
expectations = {
# tier_len => expected replicas
1: {
(0,): 3.0,
(1,): 3.0,
},
2: {
(0, 0): 2.0,
(0, 1): 2.0,
(1, 0): 2.0,
(1, 1): 2.0,
}
}
wr = rb._build_weighted_replicas_by_tier()
for tier_len, expected in expectations.items():
self.assertEqual(expected, {t: ceil(r) for (t, r) in
wr.items() if len(t) == tier_len})
class TestRingBuilderDispersion(unittest.TestCase):
def setUp(self):
self.devs = ('d%s' % i for i in itertools.count())
def assertAlmostPartCount(self, counts, expected, delta=3):
msgs = []
failed = False
for k, p in sorted(expected.items()):
try:
self.assertAlmostEqual(counts[k], p, delta=delta)
except KeyError:
self.fail('%r is missing the key %r' % (counts, k))
except AssertionError:
failed = True
state = '!='
else:
state = 'ok'
msgs.append('parts in %s was %s expected %s (%s)' % (
k, counts[k], p, state))
if failed:
self.fail('some part counts not close enough '
'to expected:\n' + '\n'.join(msgs))
def test_rebalance_dispersion(self):
rb = ring.RingBuilder(8, 6, 0)
for i in range(6):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1',
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
rb.rebalance()
self.assertEqual(0, rb.dispersion)
for z in range(2):
for i in range(6):
rb.add_dev({'region': 0, 'zone': z + 1, 'ip': '127.0.1.1',
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
{0: 1536, 1: 0, 2: 0})
rb.rebalance()
self.assertEqual(rb.dispersion, 50.0)
expected = {0: 1280, 1: 128, 2: 128}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(rb.dispersion, 33.333333333333336)
expected = {0: 1024, 1: 256, 2: 256}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(rb.dispersion, 16.666666666666668)
expected = {0: 768, 1: 384, 2: 384}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
rb.rebalance()
self.assertEqual(0, rb.dispersion)
expected = {0: 512, 1: 512, 2: 512}
self.assertAlmostPartCount(_partition_counts(rb, 'zone'), expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+$', verbose=True)['graph'])
counts = {int(k.split('z')[1]): d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
def test_weight_dispersion(self):
rb = ring.RingBuilder(8, 3, 0)
for i in range(2):
for d in range(3):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.%s.1' % i,
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
for d in range(3):
rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.2.1',
'port': 6000, 'weight': 10.0,
'device': next(self.devs)})
rb.rebalance()
# each tier should only have 1 replicanth, but the big server has 2
# replicas of every part and 3 replicas another 1/2 - so our total
# dispersion is greater than one replicanth, it's 1.5
self.assertEqual(50.0, rb.dispersion)
expected = {
'127.0.0.1': 64,
'127.0.1.1': 64,
'127.0.2.1': 640,
}
self.assertAlmostPartCount(_partition_counts(rb, 'ip'),
expected)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph'])
counts = {k.split('-')[1]: d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected)
def test_multiple_tier_dispersion(self):
rb = ring.RingBuilder(10, 8, 0)
r_z_to_ip_count = {
(0, 0): 2,
(1, 1): 1,
(1, 2): 2,
}
ip_index = 0
for (r, z), ip_count in sorted(r_z_to_ip_count.items()):
for i in range(ip_count):
ip_index += 1
for d in range(3):
rb.add_dev({'region': r, 'zone': z,
'ip': '127.%s.%s.%s' % (r, z, ip_index),
'port': 6000, 'weight': 1.0,
'device': next(self.devs)})
for i in range(3):
# it might take a few rebalances for all the right part replicas to
# balance from r1z2 into r1z1
rb.rebalance()
self.assertAlmostEqual(15.52734375, rb.dispersion, delta=5.0)
self.assertAlmostEqual(0.0, rb.get_balance(), delta=0.5)
expected = {
'127.0.0.1': 1638,
'127.0.0.2': 1638,
'127.1.1.3': 1638,
'127.1.2.4': 1638,
'127.1.2.5': 1638,
}
delta = 10
self.assertAlmostPartCount(_partition_counts(rb, 'ip'), expected,
delta=delta)
report = dict(utils.dispersion_report(
rb, r'r\d+z\d+-[^/]*$', verbose=True)['graph'])
counts = {k.split('-')[1]: d['placed_parts']
for k, d in report.items()}
self.assertAlmostPartCount(counts, expected, delta=delta)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
karyon/django | django/contrib/postgres/operations.py | 374 | 1377 | from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor != 'postgresql':
return
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super(HStoreExtension, self).database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
| bsd-3-clause |
Fale/ansible | lib/ansible/modules/tempfile.py | 13 | 3375 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Krzysztof Magosa <krzysztof@magosa.pl>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: tempfile
version_added: "2.3"
short_description: Creates temporary files and directories
description:
- The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
you need to use M(ansible.builtin.file) module.
- For Windows targets, use the M(ansible.windows.win_tempfile) module instead.
options:
state:
description:
- Whether to create file or directory.
type: str
choices: [ directory, file ]
default: file
path:
description:
- Location where temporary file or directory should be created.
- If path is not specified, the default system temporary directory will be used.
type: path
prefix:
description:
- Prefix of file/directory name created by module.
type: str
default: ansible.
suffix:
description:
- Suffix of file/directory name created by module.
type: str
default: ""
seealso:
- module: ansible.builtin.file
- module: ansible.windows.win_tempfile
author:
- Krzysztof Magosa (@krzysztof-magosa)
'''
EXAMPLES = """
- name: Create temporary build directory
ansible.builtin.tempfile:
state: directory
suffix: build
- name: Create temporary file
ansible.builtin.tempfile:
state: file
suffix: temp
register: tempfile_1
- name: Use the registered var and the file module to remove the temporary file
ansible.builtin.file:
path: "{{ tempfile_1.path }}"
state: absent
when: tempfile_1.path is defined
"""
RETURN = '''
path:
description: Path to created file or directory.
returned: success
type: str
sample: "/tmp/ansible.bMlvdk"
'''
from os import close
from tempfile import mkstemp, mkdtemp
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='file', choices=['file', 'directory']),
path=dict(type='path'),
prefix=dict(type='str', default='ansible.'),
suffix=dict(type='str', default=''),
),
)
try:
if module.params['state'] == 'file':
handle, path = mkstemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
close(handle)
elif module.params['state'] == 'directory':
path = mkdtemp(
prefix=module.params['prefix'],
suffix=module.params['suffix'],
dir=module.params['path'],
)
module.exit_json(changed=True, path=path)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
TomasTriska/Robbie | robbie/src/programy/parser/template/evaluator.py | 1 | 1509 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes import TemplateNode
from programy.bot import Bot
class TemplateEvaluator(object):
# TODO wrap every resolve method in try/catch and return "" if error
def evaluate(self, bot: Bot, clientid: str, template_node: TemplateNode):
logging.debug("Evaluating node [%s]", template_node.to_string())
template_node.output()
return template_node.resolve(bot, clientid)
| lgpl-3.0 |
pwendell/mesos | third_party/boto-2.0b2/boto/vpc/subnet.py | 10 | 1996 | # Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Subnet
"""
from boto.ec2.ec2object import EC2Object
class Subnet(EC2Object):
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
self.id = None
self.state = None
self.cidr_block = None
self.available_ip_address_count = 0
self.availability_zone = None
def __repr__(self):
return 'Subnet:%s' % self.id
def endElement(self, name, value, connection):
if name == 'subnetId':
self.id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'availableIpAddressCount':
self.available_ip_address_count = int(value)
elif name == 'availabilityZone':
self.availability_zone = value
else:
setattr(self, name, value)
| apache-2.0 |
mensler/ansible | lib/ansible/modules/cloud/centurylink/clc_modify_server.py | 70 | 35324 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_modify_server
short_description: modify servers in CenturyLink Cloud.
description:
- An Ansible module to modify servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to modify.
required: True
cpu:
description:
- How many CPUs to update on the server
required: False
default: None
memory:
description:
- Memory (in GB) to set to the server.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti affinity policy id to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_name'
required: False
default: None
anti_affinity_policy_name:
description:
- The anti affinity policy name to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_id'
required: False
default: None
alert_policy_id:
description:
- The alert policy id to be associated to the server.
This is mutually exclusive with 'alert_policy_name'
required: False
default: None
alert_policy_name:
description:
- The alert policy name to be associated to the server.
This is mutually exclusive with 'alert_policy_id'
required: False
default: None
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [ True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: set the cpu count to 4 on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 4
state: present
- name: set the memory to 8GB on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
memory: 8
state: present
- name: set the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: present
- name: remove the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: absent
- name: add the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: present
- name: remove the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: absent
- name: set the memory to 16GB and cpu to 8 core on a lust if servers
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 8
memory: 16
state: present
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects that are changed
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcModifyServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
p = self.module.params
cpu = p.get('cpu')
memory = p.get('memory')
state = p.get('state')
if state == 'absent' and (cpu or memory):
return self.module.fail_json(
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to modify: %s' %
server_ids)
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
server_ids=server_ids)
self.module.exit_json(
changed=changed,
server_ids=changed_server_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
cpu=dict(),
memory=dict(),
anti_affinity_policy_id=dict(),
anti_affinity_policy_name=dict(),
alert_policy_id=dict(),
alert_policy_name=dict(),
wait=dict(type='bool', default=True)
)
mutually_exclusive = [
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name']
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: the error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex.message)
def _modify_servers(self, server_ids):
"""
modify the servers configuration on the provided list
:param server_ids: list of servers to modify
:return: a list of dictionaries with server information about the servers that were modified
"""
p = self.module.params
state = p.get('state')
server_params = {
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
'alert_policy_id': p.get('alert_policy_id'),
'alert_policy_name': p.get('alert_policy_name'),
}
changed = False
server_changed = False
aa_changed = False
ap_changed = False
server_dict_array = []
result_server_ids = []
request_list = []
changed_servers = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return self.module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
for server in servers:
if state == 'present':
server_changed, server_result = self._ensure_server_config(
server, server_params)
if server_result:
request_list.append(server_result)
aa_changed = self._ensure_aa_policy_present(
server,
server_params)
ap_changed = self._ensure_alert_policy_present(
server,
server_params)
elif state == 'absent':
aa_changed = self._ensure_aa_policy_absent(
server,
server_params)
ap_changed = self._ensure_alert_policy_absent(
server,
server_params)
if server_changed or aa_changed or ap_changed:
changed_servers.append(server)
changed = True
self._wait_for_requests(self.module, request_list)
self._refresh_servers(self.module, changed_servers)
for server in changed_servers:
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
def _ensure_server_config(
self, server, server_params):
"""
ensures the server is updated with the provided cpu and memory
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
cpu = server_params.get('cpu')
memory = server_params.get('memory')
changed = False
result = None
if not cpu:
cpu = server.cpu
if not memory:
memory = server.memory
if memory != server.memory or cpu != server.cpu:
if not self.module.check_mode:
result = self._modify_clc_server(
self.clc,
self.module,
server.id,
cpu,
memory)
changed = True
return changed, result
@staticmethod
def _modify_clc_server(clc, module, server_id, cpu, memory):
"""
Modify the memory or CPU of a clc server.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param server_id: id of the server to modify
:param cpu: the new cpu value
:param memory: the new memory value
:return: the result of CLC API call
"""
result = None
acct_alias = clc.v2.Account.GetAlias()
try:
# Update the server configuration
job_obj = clc.v2.API.Call('PATCH',
'servers/%s/%s' % (acct_alias,
server_id),
json.dumps([{"op": "set",
"member": "memory",
"value": memory},
{"op": "set",
"member": "cpu",
"value": cpu}]))
result = clc.v2.Requests(job_obj)
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process modify server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
def _ensure_aa_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided anti affinity policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id != current_aa_policy_id:
self._modify_aa_policy(
self.clc,
self.module,
acct_alias,
server.id,
aa_policy_id)
changed = True
return changed
def _ensure_aa_policy_absent(
self, server, server_params):
"""
ensures the the provided anti affinity policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id == current_aa_policy_id:
self._delete_aa_policy(
self.clc,
self.module,
acct_alias,
server.id)
changed = True
return changed
@staticmethod
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
"""
modifies the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param aa_policy_id: the anti affinity policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('PUT',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({"id": aa_policy_id}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _delete_aa_policy(clc, module, acct_alias, server_id):
"""
Delete the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
alias, str(ex.response_text)))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
"""
retrieves the anti affinity policy id of the server based on the CLC server id
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param server_id: the CLC server id
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
result = clc.v2.API.Call(
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
(alias, server_id))
aa_policy_id = result.get('id')
except APIFailedResponse as ex:
if ex.response_status_code != 404:
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
server_id, str(ex.response_text)))
return aa_policy_id
def _ensure_alert_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided alert policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and not self._alert_policy_exists(
server, alert_policy_id):
self._add_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
def _ensure_alert_policy_absent(
self, server, server_params):
"""
ensures the alert policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and self._alert_policy_exists(
server, alert_policy_id):
self._remove_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
@staticmethod
def _add_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
add the alert policy to CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('POST',
'servers/%s/%s/alertPolicies' % (
acct_alias,
server_id),
json.dumps({"id": alert_policy_id}))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _remove_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
remove the alert policy to the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/alertPolicies/%s'
% (acct_alias, server_id, alert_policy_id))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
retrieves the alert policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
try:
alert_policies = clc.v2.API.Call(method='GET',
url='alertPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
alias, str(ex.response_text)))
for alert_policy in alert_policies.get('items'):
if alert_policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = alert_policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _alert_policy_exists(server, alert_policy_id):
"""
Checks if the alert policy exists for the server
:param server: the clc server object
:param alert_policy_id: the alert policy
:return: True: if the given alert policy id associated to the server, False otherwise
"""
result = False
alert_policies = server.alertPolicies
if alert_policies:
for alert_policy in alert_policies:
if alert_policy.get('id') == alert_policy_id:
result = True
return result
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcModifyServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_modify_server = ClcModifyServer(module)
clc_modify_server.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/eufy/switch.py | 3 | 1793 | """Support for Eufy switches."""
import logging
from homeassistant.components.switch import SwitchDevice
DEPENDENCIES = ['eufy']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Eufy switches."""
if discovery_info is None:
return
add_entities([EufySwitch(discovery_info)], True)
class EufySwitch(SwitchDevice):
"""Representation of a Eufy switch."""
def __init__(self, device):
"""Initialize the light."""
import lakeside
self._state = None
self._name = device['name']
self._address = device['address']
self._code = device['code']
self._type = device['type']
self._switch = lakeside.switch(self._address, self._code, self._type)
self._switch.connect()
def update(self):
"""Synchronise state from the switch."""
self._switch.update()
self._state = self._switch.power
@property
def unique_id(self):
"""Return the ID of this light."""
return self._address
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the specified switch on."""
try:
self._switch.set_state(True)
except BrokenPipeError:
self._switch.connect()
self._switch.set_state(power=True)
def turn_off(self, **kwargs):
"""Turn the specified switch off."""
try:
self._switch.set_state(False)
except BrokenPipeError:
self._switch.connect()
self._switch.set_state(False)
| apache-2.0 |
bikong2/django | tests/check_framework/test_model_field_deprecation.py | 322 | 2584 | from django.core import checks
from django.db import models
from django.test import SimpleTestCase
from .tests import IsolateModelsMixin
class TestDeprecatedField(IsolateModelsMixin, SimpleTestCase):
def test_default_details(self):
class MyField(models.Field):
system_check_deprecated_details = {}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Warning(
msg='MyField has been deprecated.',
hint=None,
obj=Model._meta.get_field('name'),
id='fields.WXXX',
)
])
def test_user_specified_details(self):
class MyField(models.Field):
system_check_deprecated_details = {
'msg': 'This field is deprecated and will be removed soon.',
'hint': 'Use something else.',
'id': 'fields.W999',
}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Warning(
msg='This field is deprecated and will be removed soon.',
hint='Use something else.',
obj=Model._meta.get_field('name'),
id='fields.W999',
)
])
class TestRemovedField(IsolateModelsMixin, SimpleTestCase):
def test_default_details(self):
class MyField(models.Field):
system_check_removed_details = {}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Error(
msg='MyField has been removed except for support in historical migrations.',
hint=None,
obj=Model._meta.get_field('name'),
id='fields.EXXX',
)
])
def test_user_specified_details(self):
class MyField(models.Field):
system_check_removed_details = {
'msg': 'Support for this field is gone.',
'hint': 'Use something else.',
'id': 'fields.E999',
}
class Model(models.Model):
name = MyField()
model = Model()
self.assertEqual(model.check(), [
checks.Error(
msg='Support for this field is gone.',
hint='Use something else.',
obj=Model._meta.get_field('name'),
id='fields.E999',
)
])
| bsd-3-clause |
Mega-DatA-Lab/mxnet | tests/python/unittest/test_gluon_data.py | 10 | 3988 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tarfile
import mxnet as mx
import numpy as np
from mxnet import gluon
def test_array_dataset():
X = np.random.uniform(size=(10, 20))
Y = np.random.uniform(size=(10,))
dataset = gluon.data.ArrayDataset(X, Y)
loader = gluon.data.DataLoader(dataset, 2)
for i, (x, y) in enumerate(loader):
assert mx.test_utils.almost_equal(x.asnumpy(), X[i*2:(i+1)*2])
assert mx.test_utils.almost_equal(y.asnumpy(), Y[i*2:(i+1)*2])
def prepare_record():
if not os.path.isdir("data/test_images"):
os.makedirs('data/test_images')
if not os.path.isdir("data/test_images/test_images"):
gluon.utils.download("http://data.mxnet.io/data/test_images.tar.gz", "data/test_images.tar.gz")
tarfile.open('data/test_images.tar.gz').extractall('data/test_images/')
if not os.path.exists('data/test.rec'):
imgs = os.listdir('data/test_images/test_images')
record = mx.recordio.MXIndexedRecordIO('data/test.idx', 'data/test.rec', 'w')
for i, img in enumerate(imgs):
str_img = open('data/test_images/test_images/'+img, 'rb').read()
s = mx.recordio.pack((0, i, i, 0), str_img)
record.write_idx(i, s)
return 'data/test.rec'
def test_recordimage_dataset():
recfile = prepare_record()
dataset = gluon.data.vision.ImageRecordDataset(recfile)
loader = gluon.data.DataLoader(dataset, 1)
for i, (x, y) in enumerate(loader):
assert x.shape[0] == 1 and x.shape[3] == 3
assert y.asscalar() == i
def test_sampler():
seq_sampler = gluon.data.SequentialSampler(10)
assert list(seq_sampler) == list(range(10))
rand_sampler = gluon.data.RandomSampler(10)
assert sorted(list(rand_sampler)) == list(range(10))
seq_batch_keep = gluon.data.BatchSampler(seq_sampler, 3, 'keep')
assert sum(list(seq_batch_keep), []) == list(range(10))
seq_batch_discard = gluon.data.BatchSampler(seq_sampler, 3, 'discard')
assert sum(list(seq_batch_discard), []) == list(range(9))
rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))
def test_datasets():
assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000
assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000
assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist')) == 60000
assert len(gluon.data.vision.FashionMNIST(root='data/fashion-mnist', train=False)) == 10000
assert len(gluon.data.vision.CIFAR10(root='data/cifar10')) == 50000
assert len(gluon.data.vision.CIFAR10(root='data/cifar10', train=False)) == 10000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100')) == 50000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000
assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000
def test_image_folder_dataset():
prepare_record()
dataset = gluon.data.vision.ImageFolderDataset('data/test_images')
assert dataset.synsets == ['test_images']
assert len(dataset.items) == 16
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
johnkeepmoving/oss-ftp | python27/win32/Lib/test/test_urlparse.py | 35 | 28405 | from test import test_support
import unittest
import urlparse
RFC1808_BASE = "http://a/b/c/d;p?q#f"
RFC2396_BASE = "http://a/b/c/d;p?q"
RFC3986_BASE = 'http://a/b/c/d;p?q'
SIMPLE_BASE = 'http://a/b/c/d'
# A list of test cases. Each test case is a two-tuple that contains
# a string with the query and a dictionary with the expected result.
parse_qsl_test_cases = [
("", []),
("&", []),
("&&", []),
("=", [('', '')]),
("=a", [('', 'a')]),
("a", [('a', '')]),
("a=", [('a', '')]),
("a=", [('a', '')]),
("&a=b", [('a', 'b')]),
("a=a+b&b=b+c", [('a', 'a b'), ('b', 'b c')]),
("a=1&a=2", [('a', '1'), ('a', '2')]),
]
class UrlParseTestCase(unittest.TestCase):
def checkRoundtrips(self, url, parsed, split):
result = urlparse.urlparse(url)
self.assertEqual(result, parsed)
t = (result.scheme, result.netloc, result.path,
result.params, result.query, result.fragment)
self.assertEqual(t, parsed)
# put it back together and it should be the same
result2 = urlparse.urlunparse(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# the result of geturl() is a fixpoint; we can always parse it
# again to get the same result:
result3 = urlparse.urlparse(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.params, result.params)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
# check the roundtrip using urlsplit() as well
result = urlparse.urlsplit(url)
self.assertEqual(result, split)
t = (result.scheme, result.netloc, result.path,
result.query, result.fragment)
self.assertEqual(t, split)
result2 = urlparse.urlunsplit(result)
self.assertEqual(result2, url)
self.assertEqual(result2, result.geturl())
# check the fixpoint property of re-parsing the result of geturl()
result3 = urlparse.urlsplit(result.geturl())
self.assertEqual(result3.geturl(), result.geturl())
self.assertEqual(result3, result)
self.assertEqual(result3.scheme, result.scheme)
self.assertEqual(result3.netloc, result.netloc)
self.assertEqual(result3.path, result.path)
self.assertEqual(result3.query, result.query)
self.assertEqual(result3.fragment, result.fragment)
self.assertEqual(result3.username, result.username)
self.assertEqual(result3.password, result.password)
self.assertEqual(result3.hostname, result.hostname)
self.assertEqual(result3.port, result.port)
def test_qsl(self):
for orig, expect in parse_qsl_test_cases:
result = urlparse.parse_qsl(orig, keep_blank_values=True)
self.assertEqual(result, expect, "Error parsing %r" % orig)
expect_without_blanks = [v for v in expect if len(v[1])]
result = urlparse.parse_qsl(orig, keep_blank_values=False)
self.assertEqual(result, expect_without_blanks,
"Error parsing %r" % orig)
def test_roundtrips(self):
testcases = [
('file:///tmp/junk.txt',
('file', '', '/tmp/junk.txt', '', '', ''),
('file', '', '/tmp/junk.txt', '', '')),
('imap://mail.python.org/mbox1',
('imap', 'mail.python.org', '/mbox1', '', '', ''),
('imap', 'mail.python.org', '/mbox1', '', '')),
('mms://wms.sys.hinet.net/cts/Drama/09006251100.asf',
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '', ''),
('mms', 'wms.sys.hinet.net', '/cts/Drama/09006251100.asf',
'', '')),
('nfs://server/path/to/file.txt',
('nfs', 'server', '/path/to/file.txt', '', '', ''),
('nfs', 'server', '/path/to/file.txt', '', '')),
('svn+ssh://svn.zope.org/repos/main/ZConfig/trunk/',
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '', ''),
('svn+ssh', 'svn.zope.org', '/repos/main/ZConfig/trunk/',
'', '')),
('git+ssh://git@github.com/user/project.git',
('git+ssh', 'git@github.com','/user/project.git',
'','',''),
('git+ssh', 'git@github.com','/user/project.git',
'', ''))
]
for url, parsed, split in testcases:
self.checkRoundtrips(url, parsed, split)
def test_http_roundtrips(self):
# urlparse.urlsplit treats 'http:' as an optimized special case,
# so we test both 'http:' and 'https:' in all the following.
# Three cheers for white box knowledge!
testcases = [
('://www.python.org',
('www.python.org', '', '', '', ''),
('www.python.org', '', '', '')),
('://www.python.org#abc',
('www.python.org', '', '', '', 'abc'),
('www.python.org', '', '', 'abc')),
('://www.python.org?q=abc',
('www.python.org', '', '', 'q=abc', ''),
('www.python.org', '', 'q=abc', '')),
('://www.python.org/#abc',
('www.python.org', '/', '', '', 'abc'),
('www.python.org', '/', '', 'abc')),
('://a/b/c/d;p?q#f',
('a', '/b/c/d', 'p', 'q', 'f'),
('a', '/b/c/d;p', 'q', 'f')),
]
for scheme in ('http', 'https'):
for url, parsed, split in testcases:
url = scheme + url
parsed = (scheme,) + parsed
split = (scheme,) + split
self.checkRoundtrips(url, parsed, split)
def checkJoin(self, base, relurl, expected):
self.assertEqual(urlparse.urljoin(base, relurl), expected,
(base, relurl, expected))
def test_unparse_parse(self):
for u in ['Python', './Python','x-newscheme://foo.com/stuff','x://y','x:/y','x:/','/',]:
self.assertEqual(urlparse.urlunsplit(urlparse.urlsplit(u)), u)
self.assertEqual(urlparse.urlunparse(urlparse.urlparse(u)), u)
def test_RFC1808(self):
# "normal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, 'g:h', 'g:h')
self.checkJoin(RFC1808_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC1808_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, '/g', 'http://a/g')
self.checkJoin(RFC1808_BASE, '//g', 'http://g')
self.checkJoin(RFC1808_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC1808_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC1808_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC1808_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC1808_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC1808_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC1808_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC1808_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC1808_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC1808_BASE, '..', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../', 'http://a/b/')
self.checkJoin(RFC1808_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, '../..', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../', 'http://a/')
self.checkJoin(RFC1808_BASE, '../../g', 'http://a/g')
# "abnormal" cases from RFC 1808:
self.checkJoin(RFC1808_BASE, '', 'http://a/b/c/d;p?q#f')
self.checkJoin(RFC1808_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC1808_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC1808_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC1808_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC1808_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC1808_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC1808_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC1808_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC1808_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC1808_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC1808_BASE, 'g/../h', 'http://a/b/c/h')
# RFC 1808 and RFC 1630 disagree on these (according to RFC 1808),
# so we'll not actually run these tests (which expect 1808 behavior).
#self.checkJoin(RFC1808_BASE, 'http:g', 'http:g')
#self.checkJoin(RFC1808_BASE, 'http:', 'http:')
def test_RFC2368(self):
# Issue 11467: path that starts with a number is not parsed correctly
self.assertEqual(urlparse.urlparse('mailto:1337@example.org'),
('mailto', '', '1337@example.org', '', '', ''))
def test_RFC2396(self):
# cases from RFC 2396
self.checkJoin(RFC2396_BASE, 'g:h', 'g:h')
self.checkJoin(RFC2396_BASE, 'g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, './g', 'http://a/b/c/g')
self.checkJoin(RFC2396_BASE, 'g/', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, '/g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '//g', 'http://g')
self.checkJoin(RFC2396_BASE, 'g?y', 'http://a/b/c/g?y')
self.checkJoin(RFC2396_BASE, '#s', 'http://a/b/c/d;p?q#s')
self.checkJoin(RFC2396_BASE, 'g#s', 'http://a/b/c/g#s')
self.checkJoin(RFC2396_BASE, 'g?y#s', 'http://a/b/c/g?y#s')
self.checkJoin(RFC2396_BASE, 'g;x', 'http://a/b/c/g;x')
self.checkJoin(RFC2396_BASE, 'g;x?y#s', 'http://a/b/c/g;x?y#s')
self.checkJoin(RFC2396_BASE, '.', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, './', 'http://a/b/c/')
self.checkJoin(RFC2396_BASE, '..', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../', 'http://a/b/')
self.checkJoin(RFC2396_BASE, '../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, '../..', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../', 'http://a/')
self.checkJoin(RFC2396_BASE, '../../g', 'http://a/g')
self.checkJoin(RFC2396_BASE, '', RFC2396_BASE)
self.checkJoin(RFC2396_BASE, '../../../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, '../../../../g', 'http://a/../../g')
self.checkJoin(RFC2396_BASE, '/./g', 'http://a/./g')
self.checkJoin(RFC2396_BASE, '/../g', 'http://a/../g')
self.checkJoin(RFC2396_BASE, 'g.', 'http://a/b/c/g.')
self.checkJoin(RFC2396_BASE, '.g', 'http://a/b/c/.g')
self.checkJoin(RFC2396_BASE, 'g..', 'http://a/b/c/g..')
self.checkJoin(RFC2396_BASE, '..g', 'http://a/b/c/..g')
self.checkJoin(RFC2396_BASE, './../g', 'http://a/b/g')
self.checkJoin(RFC2396_BASE, './g/.', 'http://a/b/c/g/')
self.checkJoin(RFC2396_BASE, 'g/./h', 'http://a/b/c/g/h')
self.checkJoin(RFC2396_BASE, 'g/../h', 'http://a/b/c/h')
self.checkJoin(RFC2396_BASE, 'g;x=1/./y', 'http://a/b/c/g;x=1/y')
self.checkJoin(RFC2396_BASE, 'g;x=1/../y', 'http://a/b/c/y')
self.checkJoin(RFC2396_BASE, 'g?y/./x', 'http://a/b/c/g?y/./x')
self.checkJoin(RFC2396_BASE, 'g?y/../x', 'http://a/b/c/g?y/../x')
self.checkJoin(RFC2396_BASE, 'g#s/./x', 'http://a/b/c/g#s/./x')
self.checkJoin(RFC2396_BASE, 'g#s/../x', 'http://a/b/c/g#s/../x')
def test_RFC3986(self):
# Test cases from RFC3986
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC2396_BASE, ';x', 'http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g:h','g:h')
self.checkJoin(RFC3986_BASE, 'g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, './g','http://a/b/c/g')
self.checkJoin(RFC3986_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, '/g','http://a/g')
self.checkJoin(RFC3986_BASE, '//g','http://g')
self.checkJoin(RFC3986_BASE, '?y','http://a/b/c/d;p?y')
self.checkJoin(RFC3986_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(RFC3986_BASE, '#s','http://a/b/c/d;p?q#s')
self.checkJoin(RFC3986_BASE, 'g#s','http://a/b/c/g#s')
self.checkJoin(RFC3986_BASE, 'g?y#s','http://a/b/c/g?y#s')
self.checkJoin(RFC3986_BASE, ';x','http://a/b/c/;x')
self.checkJoin(RFC3986_BASE, 'g;x','http://a/b/c/g;x')
self.checkJoin(RFC3986_BASE, 'g;x?y#s','http://a/b/c/g;x?y#s')
self.checkJoin(RFC3986_BASE, '','http://a/b/c/d;p?q')
self.checkJoin(RFC3986_BASE, '.','http://a/b/c/')
self.checkJoin(RFC3986_BASE, './','http://a/b/c/')
self.checkJoin(RFC3986_BASE, '..','http://a/b/')
self.checkJoin(RFC3986_BASE, '../','http://a/b/')
self.checkJoin(RFC3986_BASE, '../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, '../..','http://a/')
self.checkJoin(RFC3986_BASE, '../../','http://a/')
self.checkJoin(RFC3986_BASE, '../../g','http://a/g')
#Abnormal Examples
# The 'abnormal scenarios' are incompatible with RFC2986 parsing
# Tests are here for reference.
#self.checkJoin(RFC3986_BASE, '../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '../../../../g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/./g','http://a/g')
#self.checkJoin(RFC3986_BASE, '/../g','http://a/g')
self.checkJoin(RFC3986_BASE, 'g.','http://a/b/c/g.')
self.checkJoin(RFC3986_BASE, '.g','http://a/b/c/.g')
self.checkJoin(RFC3986_BASE, 'g..','http://a/b/c/g..')
self.checkJoin(RFC3986_BASE, '..g','http://a/b/c/..g')
self.checkJoin(RFC3986_BASE, './../g','http://a/b/g')
self.checkJoin(RFC3986_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(RFC3986_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(RFC3986_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(RFC3986_BASE, 'g;x=1/./y','http://a/b/c/g;x=1/y')
self.checkJoin(RFC3986_BASE, 'g;x=1/../y','http://a/b/c/y')
self.checkJoin(RFC3986_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(RFC3986_BASE, 'g?y/../x','http://a/b/c/g?y/../x')
self.checkJoin(RFC3986_BASE, 'g#s/./x','http://a/b/c/g#s/./x')
self.checkJoin(RFC3986_BASE, 'g#s/../x','http://a/b/c/g#s/../x')
#self.checkJoin(RFC3986_BASE, 'http:g','http:g') # strict parser
self.checkJoin(RFC3986_BASE, 'http:g','http://a/b/c/g') # relaxed parser
# Test for issue9721
self.checkJoin('http://a/b/c/de', ';x','http://a/b/c/;x')
def test_urljoins(self):
self.checkJoin(SIMPLE_BASE, 'g:h','g:h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, './g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'g/','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/g','http://a/g')
self.checkJoin(SIMPLE_BASE, '//g','http://g')
self.checkJoin(SIMPLE_BASE, '?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin(SIMPLE_BASE, '.','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, './','http://a/b/c/')
self.checkJoin(SIMPLE_BASE, '..','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../','http://a/b/')
self.checkJoin(SIMPLE_BASE, '../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, '../..','http://a/')
self.checkJoin(SIMPLE_BASE, '../../g','http://a/g')
self.checkJoin(SIMPLE_BASE, '../../../g','http://a/../g')
self.checkJoin(SIMPLE_BASE, './../g','http://a/b/g')
self.checkJoin(SIMPLE_BASE, './g/.','http://a/b/c/g/')
self.checkJoin(SIMPLE_BASE, '/./g','http://a/./g')
self.checkJoin(SIMPLE_BASE, 'g/./h','http://a/b/c/g/h')
self.checkJoin(SIMPLE_BASE, 'g/../h','http://a/b/c/h')
self.checkJoin(SIMPLE_BASE, 'http:g','http://a/b/c/g')
self.checkJoin(SIMPLE_BASE, 'http:','http://a/b/c/d')
self.checkJoin(SIMPLE_BASE, 'http:?y','http://a/b/c/d?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y','http://a/b/c/g?y')
self.checkJoin(SIMPLE_BASE, 'http:g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('http:///', '..','http:///')
self.checkJoin('', 'http://a/b/c/g?y/./x','http://a/b/c/g?y/./x')
self.checkJoin('', 'http://a/./g', 'http://a/./g')
self.checkJoin('svn://pathtorepo/dir1','dir2','svn://pathtorepo/dir2')
self.checkJoin('svn+ssh://pathtorepo/dir1','dir2','svn+ssh://pathtorepo/dir2')
def test_RFC2732(self):
for url, hostname, port in [
('http://Test.python.org:5432/foo/', 'test.python.org', 5432),
('http://12.34.56.78:5432/foo/', '12.34.56.78', 5432),
('http://[::1]:5432/foo/', '::1', 5432),
('http://[dead:beef::1]:5432/foo/', 'dead:beef::1', 5432),
('http://[dead:beef::]:5432/foo/', 'dead:beef::', 5432),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:5432/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', 5432),
('http://[::12.34.56.78]:5432/foo/', '::12.34.56.78', 5432),
('http://[::ffff:12.34.56.78]:5432/foo/',
'::ffff:12.34.56.78', 5432),
('http://Test.python.org/foo/', 'test.python.org', None),
('http://12.34.56.78/foo/', '12.34.56.78', None),
('http://[::1]/foo/', '::1', None),
('http://[dead:beef::1]/foo/', 'dead:beef::1', None),
('http://[dead:beef::]/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]/foo/',
'::ffff:12.34.56.78', None),
('http://Test.python.org:/foo/', 'test.python.org', None),
('http://12.34.56.78:/foo/', '12.34.56.78', None),
('http://[::1]:/foo/', '::1', None),
('http://[dead:beef::1]:/foo/', 'dead:beef::1', None),
('http://[dead:beef::]:/foo/', 'dead:beef::', None),
('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:/foo/',
'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]:/foo/',
'::ffff:12.34.56.78', None),
]:
urlparsed = urlparse.urlparse(url)
self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
for invalid_url in [
'http://::12.34.56.78]/',
'http://[::1/foo/',
'ftp://[::1/foo/bad]/bad',
'http://[::1/foo/bad]/bad',
'http://[::ffff:12.34.56.78']:
self.assertRaises(ValueError, urlparse.urlparse, invalid_url)
def test_urldefrag(self):
for url, defrag, frag in [
('http://python.org#frag', 'http://python.org', 'frag'),
('http://python.org', 'http://python.org', ''),
('http://python.org/#frag', 'http://python.org/', 'frag'),
('http://python.org/', 'http://python.org/', ''),
('http://python.org/?q#frag', 'http://python.org/?q', 'frag'),
('http://python.org/?q', 'http://python.org/?q', ''),
('http://python.org/p#frag', 'http://python.org/p', 'frag'),
('http://python.org/p?q', 'http://python.org/p?q', ''),
(RFC1808_BASE, 'http://a/b/c/d;p?q', 'f'),
(RFC2396_BASE, 'http://a/b/c/d;p?q', ''),
]:
self.assertEqual(urlparse.urldefrag(url), (defrag, frag))
def test_urlsplit_attributes(self):
url = "HTTP://WWW.PYTHON.ORG/doc/#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "WWW.PYTHON.ORG")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, None)
# geturl() won't return exactly the original URL in this case
# since the scheme is always case-normalized
#self.assertEqual(p.geturl(), url)
url = "http://User:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Addressing issue1698, which suggests Username can contain
# "@" characters. Though not RFC compliant, many ftp sites allow
# and request email addresses as usernames.
url = "http://User@example.com:Pass@www.python.org:080/doc/?query=yes#frag"
p = urlparse.urlsplit(url)
self.assertEqual(p.scheme, "http")
self.assertEqual(p.netloc, "User@example.com:Pass@www.python.org:080")
self.assertEqual(p.path, "/doc/")
self.assertEqual(p.query, "query=yes")
self.assertEqual(p.fragment, "frag")
self.assertEqual(p.username, "User@example.com")
self.assertEqual(p.password, "Pass")
self.assertEqual(p.hostname, "www.python.org")
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
# Verify an illegal port of value greater than 65535 is set as None
url = "http://www.python.org:65536"
p = urlparse.urlsplit(url)
self.assertEqual(p.port, None)
def test_issue14072(self):
p1 = urlparse.urlsplit('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urlparse.urlsplit('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
# Assert for urlparse
p1 = urlparse.urlparse('tel:+31-641044153')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+31-641044153')
p2 = urlparse.urlparse('tel:+31641044153')
self.assertEqual(p2.scheme, 'tel')
self.assertEqual(p2.path, '+31641044153')
def test_telurl_params(self):
p1 = urlparse.urlparse('tel:123-4;phone-context=+1-650-516')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '123-4')
self.assertEqual(p1.params, 'phone-context=+1-650-516')
p1 = urlparse.urlparse('tel:+1-201-555-0123')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '+1-201-555-0123')
self.assertEqual(p1.params, '')
p1 = urlparse.urlparse('tel:7042;phone-context=example.com')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '7042')
self.assertEqual(p1.params, 'phone-context=example.com')
p1 = urlparse.urlparse('tel:863-1234;phone-context=+1-914-555')
self.assertEqual(p1.scheme, 'tel')
self.assertEqual(p1.path, '863-1234')
self.assertEqual(p1.params, 'phone-context=+1-914-555')
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
p = urlparse.urlsplit("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
p = urlparse.urlparse("http://www.example.net:foo")
self.assertEqual(p.netloc, "www.example.net:foo")
self.assertRaises(ValueError, lambda: p.port)
def test_attributes_without_netloc(self):
# This example is straight from RFC 3261. It looks like it
# should allow the username, hostname, and port to be filled
# in, but doesn't. Since it's a URI and doesn't use the
# scheme://netloc syntax, the netloc and related attributes
# should be left empty.
uri = "sip:alice@atlanta.com;maddr=239.255.255.1;ttl=15"
p = urlparse.urlsplit(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
p = urlparse.urlparse(uri)
self.assertEqual(p.netloc, "")
self.assertEqual(p.username, None)
self.assertEqual(p.password, None)
self.assertEqual(p.hostname, None)
self.assertEqual(p.port, None)
self.assertEqual(p.geturl(), uri)
def test_caching(self):
# Test case for bug #1313119
uri = "http://example.com/doc/"
unicode_uri = unicode(uri)
urlparse.urlparse(unicode_uri)
p = urlparse.urlparse(uri)
self.assertEqual(type(p.scheme), type(uri))
self.assertEqual(type(p.hostname), type(uri))
self.assertEqual(type(p.path), type(uri))
def test_noslash(self):
# Issue 1637: http://foo.com?query is legal
self.assertEqual(urlparse.urlparse("http://example.com?blahblah=/foo"),
('http', 'example.com', '', '', 'blahblah=/foo', ''))
def test_anyscheme(self):
# Issue 7904: s3://foo.com/stuff has netloc "foo.com".
self.assertEqual(urlparse.urlparse("s3://foo.com/stuff"),
('s3','foo.com','/stuff','','',''))
self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff"),
('x-newscheme','foo.com','/stuff','','',''))
self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query#fragment"),
('x-newscheme','foo.com','/stuff','','query','fragment'))
self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query"),
('x-newscheme','foo.com','/stuff','','query',''))
def test_withoutscheme(self):
# Test urlparse without scheme
# Issue 754016: urlparse goes wrong with IP:port without scheme
# RFC 1808 specifies that netloc should start with //, urlparse expects
# the same, otherwise it classifies the portion of url as path.
self.assertEqual(urlparse.urlparse("path"),
('','','path','','',''))
self.assertEqual(urlparse.urlparse("//www.python.org:80"),
('','www.python.org:80','','','',''))
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
def test_portseparator(self):
# Issue 754016 makes changes for port separator ':' from scheme separator
self.assertEqual(urlparse.urlparse("path:80"),
('','','path:80','','',''))
self.assertEqual(urlparse.urlparse("http:"),('http','','','','',''))
self.assertEqual(urlparse.urlparse("https:"),('https','','','','',''))
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
def test_main():
test_support.run_unittest(UrlParseTestCase)
if __name__ == "__main__":
test_main()
| mit |
dflazaro/Orion2GoogleSpreadsheet | clientcreds.py | 2 | 2761 | """
This module is part of Orion2GoogleSpreadsheet project.
Contains functionality used by Orion2GoogleSpreadsheet to Obtain credentials
and Authenticate clients for communication with Google Data API.
"""
import yaml
import logs
import httplib2
import gdata.spreadsheet.service
from googleapiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import tools
# Load Credentials and Properties #
def get_properties():
logs.logger.info("Loading properties")
try:
file = open("credentials.yaml")
properties = yaml.load(file)
logs.logger.info("Properties loaded")
return properties
except:
logs.logger.error("Error loading properties")
# Clients Auth and Credentials
def get_client_credentials(client):
"""
Makes Google Oauth flow and store credentials in file.
Creates an authenticated Drive client/ Spreadsheets client
depending on the client param introduced.
param client: "drive" for Drive client / "sheets" for Spreadsheets client
type client: string
return: Authenticated Drive/Spreadsheet client Object
"""
try:
logs.logger.info("Creating storage for credentials")
storage = Storage("creds.dat")
credentials = storage.get()
if credentials is None or credentials.invalid:
logs.logger.info("Obtaining credentials")
flags = tools.argparser.parse_args(args=[])
properties = get_properties()
flow = OAuth2WebServerFlow(properties['CLIENT_ID'], properties['CLIENT_SECRET'],
properties['OAUTH_SCOPE'], properties['REDIRECT_URI'])
credentials = tools.run_flow(flow, storage, flags)
if credentials.access_token_expired:
credentials.refresh(httplib2.Http())
if client == "drive":
try:
logs.logger.info("Creating Drive client")
http = credentials.authorize(httplib2.Http())
dr_client = build('drive', 'v2', http=http)
return dr_client
except:
logs.logger.warn("An error occurred while creating Drive client")
elif client == "sheets":
try:
logs.logger.info("Creating Spreadsheets client")
sp_client = gdata.spreadsheet.service.SpreadsheetsService(
additional_headers={'Authorization': 'Bearer %s' % credentials.access_token})
return sp_client
except:
logs.logger.warn("An error occurred while creating Spreadsheets client")
except:
logs.logger.warn("An error occurred while obtaining credentials") | gpl-3.0 |
cdrttn/samba-regedit | python/samba/__init__.py | 15 | 11893 | # Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba 4."""
__docformat__ = "restructuredText"
import os
import sys
import samba.param
def source_tree_topdir():
"""Return the top level source directory."""
paths = ["../../..", "../../../.."]
for p in paths:
topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
if os.path.exists(os.path.join(topdir, 'source4')):
return topdir
raise RuntimeError("unable to find top level source directory")
def in_source_tree():
"""Return True if we are running from within the samba source tree"""
try:
topdir = source_tree_topdir()
except RuntimeError:
return False
return True
import ldb
from samba._ldb import Ldb as _Ldb
class Ldb(_Ldb):
"""Simple Samba-specific LDB subclass that takes care
of setting up the modules dir, credentials pointers, etc.
Please note that this is intended to be for all Samba LDB files,
not necessarily the Sam database. For Sam-specific helper
functions see samdb.py.
"""
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens a Samba Ldb file.
:param url: Optional LDB URL to open
:param lp: Optional loadparm object
:param modules_dir: Optional modules directory
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param options: Additional options (optional)
This is different from a regular Ldb file in that the Samba-specific
modules-dir is used by default and that credentials and session_info
can be passed through (required by some modules).
"""
if modules_dir is not None:
self.set_modules_dir(modules_dir)
else:
self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
if session_info is not None:
self.set_session_info(session_info)
if credentials is not None:
self.set_credentials(credentials)
if lp is not None:
self.set_loadparm(lp)
# This must be done before we load the schema, as these handlers for
# objectSid and objectGUID etc must take precedence over the 'binary
# attribute' declaration in the schema
self.register_samba_handlers()
# TODO set debug
def msg(l, text):
print text
#self.set_debug(msg)
self.set_utf8_casefold()
# Allow admins to force non-sync ldb for all databases
if lp is not None:
nosync_p = lp.get("nosync", "ldb")
if nosync_p is not None and nosync_p:
flags |= ldb.FLG_NOSYNC
self.set_create_perms(0600)
if url is not None:
self.connect(url, flags, options)
def searchone(self, attribute, basedn=None, expression=None,
scope=ldb.SCOPE_BASE):
"""Search for one attribute as a string.
:param basedn: BaseDN for the search.
:param attribute: Name of the attribute
:param expression: Optional search expression.
:param scope: Search scope (defaults to base).
:return: Value of attribute as a string or None if it wasn't found.
"""
res = self.search(basedn, scope, expression, [attribute])
if len(res) != 1 or res[0][attribute] is None:
return None
values = set(res[0][attribute])
assert len(values) == 1
return self.schema_format_value(attribute, values.pop())
def erase_users_computers(self, dn):
"""Erases user and computer objects from our AD.
This is needed since the 'samldb' module denies the deletion of primary
groups. Therefore all groups shouldn't be primary somewhere anymore.
"""
try:
res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
expression="(|(objectclass=user)(objectclass=computer))")
except ldb.LdbError, (errno, _):
if errno == ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
return
else:
raise
try:
for msg in res:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
def erase_except_schema_controlled(self):
"""Erase this ldb.
:note: Removes all records, except those that are controlled by
Samba4's schema.
"""
basedn = ""
# Try to delete user/computer accounts to allow deletion of groups
self.erase_users_computers(basedn)
# Delete the 'visible' records, and the invisble 'deleted' records (if
# this DB supports it)
for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"]):
try:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
res = self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"])
assert len(res) == 0
# delete the specials
for attr in ["@SUBCLASSES", "@MODULES",
"@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def erase(self):
"""Erase this ldb, removing all records."""
self.erase_except_schema_controlled()
# delete the specials
for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def load_ldif_file_add(self, ldif_path):
"""Load a LDIF file.
:param ldif_path: Path to LDIF file.
"""
self.add_ldif(open(ldif_path, 'r').read())
def add_ldif(self, ldif, controls=None):
"""Add data based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
assert changetype == ldb.CHANGETYPE_NONE
self.add(msg, controls)
def modify_ldif(self, ldif, controls=None):
"""Modify database based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
if changetype == ldb.CHANGETYPE_ADD:
self.add(msg, controls)
else:
self.modify(msg, controls)
def substitute_var(text, values):
"""Substitute strings of the form ${NAME} in str, replacing
with substitutions from values.
:param text: Text in which to subsitute.
:param values: Dictionary with keys and values.
"""
for (name, value) in values.items():
assert isinstance(name, str), "%r is not a string" % name
assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
text = text.replace("${%s}" % name, value)
return text
def check_all_substituted(text):
"""Check that all substitution variables in a string have been replaced.
If not, raise an exception.
:param text: The text to search for substitution variables
"""
if not "${" in text:
return
var_start = text.find("${")
var_end = text.find("}", var_start)
raise Exception("Not all variables substituted: %s" %
text[var_start:var_end+1])
def read_and_sub_file(file_name, subst_vars):
"""Read a file and sub in variables found in it
:param file_name: File to be read (typically from setup directory)
param subst_vars: Optional variables to subsitute in the file.
"""
data = open(file_name, 'r').read()
if subst_vars is not None:
data = substitute_var(data, subst_vars)
check_all_substituted(data)
return data
def setup_file(template, fname, subst_vars=None):
"""Setup a file in the private dir.
:param template: Path of the template file.
:param fname: Path of the file to create.
:param subst_vars: Substitution variables.
"""
if os.path.exists(fname):
os.unlink(fname)
data = read_and_sub_file(template, subst_vars)
f = open(fname, 'w')
try:
f.write(data)
finally:
f.close()
MAX_NETBIOS_NAME_LEN = 15
def is_valid_netbios_char(c):
return (c.isalnum() or c in " !#$%&'()-.@^_{}~")
def valid_netbios_name(name):
"""Check whether a name is valid as a NetBIOS name. """
# See crh's book (1.4.1.1)
if len(name) > MAX_NETBIOS_NAME_LEN:
return False
for x in name:
if not is_valid_netbios_char(x):
return False
return True
def import_bundled_package(modulename, location):
"""Import the bundled version of a package.
:note: This should only be called if the system version of the package
is not adequate.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
if in_source_tree():
sys.path.insert(0, os.path.join(source_tree_topdir(), "lib", location))
sys.modules[modulename] = __import__(modulename)
else:
sys.modules[modulename] = __import__(
"samba.external.%s" % modulename, fromlist=["samba.external"])
def ensure_external_module(modulename, location):
"""Add a location to sys.path if an external dependency can't be found.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
try:
__import__(modulename)
except ImportError:
import_bundled_package(modulename, location)
def dn_from_dns_name(dnsdomain):
"""return a DN from a DNS name domain/forest root"""
return "DC=" + ",DC=".join(dnsdomain.split("."))
import _glue
version = _glue.version
interface_ips = _glue.interface_ips
set_debug_level = _glue.set_debug_level
get_debug_level = _glue.get_debug_level
unix2nttime = _glue.unix2nttime
nttime2string = _glue.nttime2string
nttime2unix = _glue.nttime2unix
unix2nttime = _glue.unix2nttime
generate_random_password = _glue.generate_random_password
strcasecmp_m = _glue.strcasecmp_m
strstr_m = _glue.strstr_m
| gpl-3.0 |
johnbachman/rasmodel | src/REM/RAF_module/BRAF_module.py | 6 | 3777 | """ Detailed mehanistic model of BRAF based on Neal Rossen paper. """
from pysb import *
from pysb.util import alias_model_components
def monomers():
Monomer('BRAF', ['ras', 'd', 'vem', 'erk'])
Monomer('Vem', ['raf'])
# IC values
# --------
Parameter('BRAF_0', 1e5)
Parameter('Vem_0', 1000)
alias_model_components()
# Initial conditions
# ------------------
Initial(BRAF(d=None, ras=None, erk=None, vem=None), BRAF_0)
Initial(Vem(raf=None), Vem_0)
def BRAF_dynamics():
# Parameters
# -----------
Parameter('kaf', 1e-6)
Parameter('kar', 1)
Parameter('kbf', 0.5) # 1)
Parameter('kbr', 1e-11)
Parameter('kcf', 1)
Parameter('kcr', 0.0001)
Parameter('kdf', 1)
Parameter('kdr', 0.1)
Parameter('kef', 1e-2)
Parameter('ker', 0.1)
Parameter('kff', 1e-5)
Parameter('kfr', 1)
Parameter('kgf', 1e-11)
Parameter('kgr', 1)
Parameter('khf', 1e-2) # 100)
Parameter('khr', 1) # 1)
Parameter('koff', 1)
alias_model_components()
# Rules
# -----
# BRAF dimerization
Rule('BRAF_dimerization',
BRAF(d=None, ras=None) + BRAF(d=None, ras=None, vem=None) <>
BRAF(d=1, ras=None) % BRAF(d=1, ras=None, vem=None), kaf, kar)
# KRAS binding BRAF monomers
Rule('KRAS_binding_BRAF_monomers',
BRAF(ras=None, d=None) + KRAS(raf=None, state='gtp') <>
BRAF(ras=1, d=None) % KRAS(raf=1, state='gtp'), kdf, kdr)
# KRAS binding BRAF dimers
Rule('KRAS_binding_BRAF_dimers',
BRAF(ras=None, d=1) % BRAF(d=1) +
KRAS(raf=None, state='gtp') <>
BRAF(ras=2, d=1) % BRAF(d=1) %
KRAS(raf=2, state='gtp'), kbf, kbr)
# KRAS:BRAF dimerization
Rule('KRASBRAF_dimerization',
BRAF(d=None, ras=ANY) + BRAF(d=None, ras=ANY, vem=None) <>
BRAF(d=1, ras=ANY) % BRAF(d=1, ras=ANY, vem=None), kcf, kcr)
# BRAF:Vem dimerization to give 2(BRAF:Vem) g = a * f
Rule('BRAF_Vem_dimerization',
BRAF(d=None, ras=None, vem=ANY) + BRAF(d=None, ras=None, vem=ANY) <>
BRAF(d=1, ras=None, vem=ANY) % BRAF(d=1, ras=None, vem=ANY), kgf, kgr)
# KRAS:BRAF:Vem dimerization to give 2( KRAS:BRAF:Vem) h = c * a
Rule('KRAS_BRAF_Vem_dimerization',
BRAF(d=None, ras=ANY, vem=ANY) + BRAF(d=None, ras=ANY, vem=ANY) <>
BRAF(d=1, ras=ANY, vem=ANY) % BRAF(d=1, ras=ANY, vem=ANY), khf, khr)
# 1st Vemurafenib binds
Rule('First_binding_Vemurafenib',
BRAF(vem=None) % BRAF(vem=None) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=None) % Vem(raf=1), kef, ker)
# 2nd Vemurafenib binding
Rule('Second_binding_vemurafenib',
BRAF(vem=None) % BRAF(vem=ANY) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=ANY) % Vem(raf=1), kff, kfr)
# Vemurafenib binds BRAF monomer
Rule('Vemurafenib_binds_BRAF_monomer',
BRAF(vem=None, d=None) + Vem(raf=None) <>
BRAF(vem=1, d=None) % Vem(raf=1), kef, ker)
# Release KRAS:GDP from BRAF
Rule('KRAS_GDP_dissoc_BRAF',
KRAS(state='gdp', raf=1) % BRAF(ras=1) >>
KRAS(state='gdp', raf=None) + BRAF(ras=None), koff)
def observables():
# Observables
# ----------
Observable('BRAF_WT_active',
BRAF(d=ANY, vem=None))
Observable('BRAF_V600E_active',
BRAF(vem=None))
# if __name__ == '__main__':
# from pysb.integrate import Solver
# import matplotlib.pyplot as plt
# import numpy as np
# ts = np.linspace(0, 100, 100)
# solver = Solver(model, ts)
# solver.run()
# plt.figure()
# plt.plot(ts, solver.yobs['BRAF_WT_active'], label='WT')
# plt.plot(ts, solver.yobs['BRAF_V600E_active'], label='V600E')
# plt.legend()
# plt.show()
| mit |
Kussie/HTPC-Manager | libs/sqlobject/tests/test_sqlite.py | 6 | 4221 | import threading
from sqlobject import *
from sqlobject.tests.dbtest import *
from sqlobject.tests.dbtest import setSQLiteConnectionFactory
from test_basic import TestSO1
class SQLiteFactoryTest(SQLObject):
name = StringCol()
def test_sqlite_factory():
setupClass(SQLiteFactoryTest)
if SQLiteFactoryTest._connection.dbName == "sqlite":
if not SQLiteFactoryTest._connection.using_sqlite2:
return
factory = [None]
def SQLiteConnectionFactory(sqlite):
class MyConnection(sqlite.Connection):
pass
factory[0] = MyConnection
return MyConnection
setSQLiteConnectionFactory(SQLiteFactoryTest, SQLiteConnectionFactory)
conn = SQLiteFactoryTest._connection.makeConnection()
assert factory[0]
assert isinstance(conn, factory[0])
def test_sqlite_factory_str():
setupClass(SQLiteFactoryTest)
if SQLiteFactoryTest._connection.dbName == "sqlite":
if not SQLiteFactoryTest._connection.using_sqlite2:
return
factory = [None]
def SQLiteConnectionFactory(sqlite):
class MyConnection(sqlite.Connection):
pass
factory[0] = MyConnection
return MyConnection
from sqlobject.sqlite import sqliteconnection
sqliteconnection.SQLiteConnectionFactory = SQLiteConnectionFactory
setSQLiteConnectionFactory(SQLiteFactoryTest, "SQLiteConnectionFactory")
conn = SQLiteFactoryTest._connection.makeConnection()
assert factory[0]
assert isinstance(conn, factory[0])
del sqliteconnection.SQLiteConnectionFactory
def test_sqlite_aggregate():
setupClass(SQLiteFactoryTest)
if SQLiteFactoryTest._connection.dbName == "sqlite":
if not SQLiteFactoryTest._connection.using_sqlite2:
return
def SQLiteConnectionFactory(sqlite):
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
super(MyConnection, self).__init__(*args, **kwargs)
self.create_aggregate("group_concat", 1, self.group_concat)
class group_concat:
def __init__(self):
self.acc = []
def step(self, value):
if isinstance(value, basestring):
self.acc.append(value)
else:
self.acc.append(str(value))
def finalize(self):
self.acc.sort()
return ", ".join(self.acc)
return MyConnection
setSQLiteConnectionFactory(SQLiteFactoryTest, SQLiteConnectionFactory)
SQLiteFactoryTest(name='sqlobject')
SQLiteFactoryTest(name='sqlbuilder')
assert SQLiteFactoryTest.select(orderBy="name").accumulateOne("group_concat", "name") == \
"sqlbuilder, sqlobject"
def do_select():
list(TestSO1.select())
def test_sqlite_threaded():
setupClass(TestSO1)
t = threading.Thread(target=do_select)
t.start()
t.join()
# This should reuse the same connection as the connection
# made above (at least will with most database drivers, but
# this will cause an error in SQLite):
do_select()
def test_empty_string():
setupClass(TestSO1)
test = TestSO1(name=None, passwd='')
assert test.name is None
assert test.passwd == ''
def test_memorydb():
if not supports("memorydb"):
return
connection = getConnection()
if connection.dbName != "sqlite":
return
if not connection._memory:
return
setupClass(TestSO1)
connection.close() # create a new connection to an in-memory database
TestSO1.setConnection(connection)
TestSO1.createTable()
def test_list_databases():
connection = getConnection()
if connection.dbName != "sqlite":
return
assert connection.listDatabases() == ['main']
def test_list_tables():
connection = getConnection()
if connection.dbName != "sqlite":
return
setupClass(TestSO1)
assert TestSO1.sqlmeta.table in connection.listTables()
| mit |
alizamus/pox_controller | pox/misc/gephi_topo.py | 40 | 5214 | # Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Detects topology and streams it to Gephi
Gephi is a pretty awesome graph visualization/manipulation package. It has
a plugin for streaming graphs back and forth between it and something else.
We use that (by opening a listening socket -- port 8282 by default) and
sending detected switches, links, and (optionally) hosts.
Based on POXDesk's tinytopo module.
Requires discovery. host_tracker is optional.
pox.py openflow.discovery misc.gephi_topo host_tracker forwarding.l2_learning
"""
from pox.core import core
from pox.lib.util import dpid_to_str
from pox.lib.ioworker.workers import *
from pox.lib.ioworker import *
import json
log = core.getLogger()
class ServerWorker (TCPServerWorker, RecocoIOWorker):
pass
clients = set()
class GephiWorker (RecocoIOWorker):
def __init__ (self, *args, **kw):
super(GephiWorker, self).__init__(*args, **kw)
self._connecting = True
self.data = b''
def _handle_close (self):
log.info("Client disconnect")
super(GephiWorker, self)._handle_close()
clients.discard(self)
def _handle_connect (self):
log.info("Client connect")
super(GephiWorker, self)._handle_connect()
core.GephiTopo.send_full(self)
clients.add(self)
def _handle_rx (self):
self.data += self.read()
while '\n' in self.data:
# We don't currently do anything with this
msg,self.data = self.data.split('\n',1)
# This SHOULD be an HTTP request.
#print msg
pass
def an (n, **kw):
kw['label'] = str(n)
return {'an':{str(n):kw}}
def ae (a, b):
a = str(a)
b = str(b)
if a > b:
a,b=b,a
return {'ae':{a+"_"+b:{'source':a,'target':b,'directed':False}}}
def de (a, b):
a = str(a)
b = str(b)
if a > b:
a,b=b,a
return {'de':{a+"_"+b:{}}}
def dn (n):
return {'dn':{str(n):{}}}
def clear ():
return {'dn':{'filter':'ALL'}}
class GephiTopo (object):
def __init__ (self):
core.listen_to_dependencies(self)
self.switches = set()
self.links = set()
self.hosts = {} # mac -> dpid
def _handle_core_ComponentRegistered (self, event):
if event.name == "host_tracker":
event.component.addListenerByName("HostEvent",
self.__handle_host_tracker_HostEvent)
def send (self, data):
for c in clients:
c.send(json.dumps(data) + '\r\n')
def send_full (self, client):
out = []
out.append(clear())
for s in self.switches:
out.append(an(s, kind='switch'))
for e in self.links:
out.append(ae(e[0],e[1]))
for h,s in self.hosts.iteritems():
out.append(an(h, kind='host'))
if s in self.switches:
out.append(ae(h,s))
out = '\r\n'.join(json.dumps(o) for o in out)
client.send(out + '\r\n')
def __handle_host_tracker_HostEvent (self, event):
# Name is intentionally mangled to keep listen_to_dependencies away
h = str(event.entry.macaddr)
s = dpid_to_str(event.entry.dpid)
if event.leave:
if h in self.hosts:
if s in self.switches:
self.send(de(h,s))
self.send(dn(h))
del self.hosts[h]
else:
if h not in self.hosts:
self.hosts[h] = s
self.send(an(h, kind='host'))
if s in self.switches:
self.send(ae(h, s))
else:
log.warn("Missing switch")
def _handle_openflow_ConnectionUp (self, event):
s = dpid_to_str(event.dpid)
if s not in self.switches:
self.send(an(s))
self.switches.add(s)
def _handle_openflow_ConnectionDown (self, event):
s = dpid_to_str(event.dpid)
if s in self.switches:
self.send(dn(s))
self.switches.remove(s)
def _handle_openflow_discovery_LinkEvent (self, event):
s1 = event.link.dpid1
s2 = event.link.dpid2
s1 = dpid_to_str(s1)
s2 = dpid_to_str(s2)
if s1 > s2: s1,s2 = s2,s1
assert s1 in self.switches
assert s2 in self.switches
if event.added and (s1,s2) not in self.links:
self.links.add((s1,s2))
self.send(ae(s1,s2))
# Do we have abandoned hosts?
for h,s in self.hosts.iteritems():
if s == s1: self.send(ae(h,s1))
elif s == s2: self.send(ae(h,s2))
elif event.removed and (s1,s2) in self.links:
self.links.remove((s1,s2))
self.send(de(s1,s2))
def launch (port = 8282):
core.registerNew(GephiTopo)
# In theory, we're supposed to be running a web service, but instead
# we just spew Gephi graph streaming junk at everyone who connects. :)
global loop
loop = RecocoIOLoop()
#loop.more_debugging = True
loop.start()
w = ServerWorker(child_worker_type=GephiWorker, port = int(port))
loop.register_worker(w)
| apache-2.0 |
denverfoundation/storybase | apps/storybase_geo/tests.py | 1 | 16382 | from geopy.geocoders.base import Geocoder
from django.http import HttpRequest
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from tastypie.test import ResourceTestCase, TestApiClient
from storybase.tests.base import (SettingsChangingTestCase,
SloppyComparisonTestMixin)
from storybase_geo.api import GeocoderResource
from storybase_geo.models import Location, Place
from storybase_geo.utils import get_geocoder
from storybase_story.models import create_story
class MockGeocoder(Geocoder):
"""Mock geocoder class
This allows us to test against the geopy geocoder interface without
being dependent on uptime of upstream geocoding services
"""
PLACES = {
"370 17th St, Denver, CO 80202": ("", (39.7438167, -104.9884953)),
"370 17th St Denver CO 80202": ("", (39.7438167, -104.9884953)),
"800 S. Halsted St. Chicago IL 60607": ("", (41.8716782, -87.6474517)),
"colfax and chambers, aurora, co": ("", (39.7399986, -104.8099387)),
"golden, co": ("", (39.756655, -105.224949)),
"80202": ("", (39.7541032, -105.000224)),
"Denver": ("", (39.737567, -104.9847179)),
}
def geocode(self, string, exactly_one=True):
if string in self.PLACES:
return[self.PLACES[string]]
return []
class MockGeocoderTestMixin(object):
"""Mixin that sets geocoder to mock geocoder if a real one is not specified
Must be used with SettingsChangingTestCase
"""
def _select_geocoder(self):
from django.conf import settings as django_settings
settings = self.get_settings_module()
if not hasattr(django_settings, 'STORYBASE_GEOCODER'):
self._old_settings['STORYBASE_GEOCODER'] = getattr(settings, 'STORYBASE_GEOCODER', None)
self._old_settings['STORYBASE_GEOCODER_ARGS'] = getattr(settings, 'STORYBASE_GEOCODER_ARGS', None)
self.set_setting('STORYBASE_GEOCODER', "storybase_geo.tests.MockGeocoder")
class OpenMapQuestGeocoderTestMixin(object):
"""Mixin that sets geocoder to OpenMapQuest"""
def _select_geocoder(self):
settings = self.get_settings_module()
self._old_settings['STORYBASE_GEOCODER'] = getattr(settings, 'STORYBASE_GEOCODER', None)
self._old_settings['STORYBASE_GEOCODER_ARGS'] = getattr(settings, 'STORYBASE_GEOCODER_ARGS', None)
self.set_setting('STORYBASE_GEOCODER', 'geopy.geocoders.OpenMapQuest')
class LocationModelTest(MockGeocoderTestMixin, SloppyComparisonTestMixin,
SettingsChangingTestCase):
def get_settings_module(self):
from storybase_geo import settings
return settings
def test_geocode(self):
"""Test internal geocoding method"""
self._select_geocoder()
loc = Location()
latlng = loc._geocode("370 17th St Denver CO 80202")
self.assertApxEqual(latlng[0], 39.7438167)
self.assertApxEqual(latlng[1], -104.9884953)
def test_geocode_on_save(self):
"""
Tests that address information in a Location is geocoded when the
Location is saved
"""
self._select_geocoder()
loc = Location(name="The Piton Foundation",
address="370 17th St",
address2="#5300",
city="Denver",
state="CO",
postcode="80202")
loc.save()
self.assertApxEqual(loc.lat, 39.7438167)
self.assertApxEqual(loc.lng, -104.9884953)
self.assertApxEqual(loc.point.x, -104.9884953)
self.assertApxEqual(loc.point.y, 39.7438167)
def test_geocode_on_change(self):
"""
Tests that address information in a Location is re-geocoded when
the address is changed.
"""
self._select_geocoder()
loc = Location(name="The Piton Foundation",
address="370 17th St",
address2="#5300",
city="Denver",
state="CO",
postcode="80202")
loc.save()
self.assertApxEqual(loc.lat, 39.7438167)
self.assertApxEqual(loc.lng, -104.9884953)
loc.name = "The Hull House"
loc.address = "800 S. Halsted St."
loc.city = "Chicago"
loc.state = "IL"
loc.postcode = "60607"
loc.save()
self.assertApxEqual(loc.lat, 41.8716782)
self.assertApxEqual(loc.lng, -87.6474517)
self.assertApxEqual(loc.point.x, -87.6474517)
self.assertApxEqual(loc.point.y, 41.8716782)
class DefaultGeocoderTest(OpenMapQuestGeocoderTestMixin,
SettingsChangingTestCase):
"""Test geocoding with the default geocoder, currently OpenMapQuest
This essentially warns us if the default geocoding service is down,
breaking this for non-modified installs.
Users are likely to use some other geocoder in production
"""
def get_settings_module(self):
from storybase_geo import settings
return settings
def test_get_geocoder(self):
"""Test that the get_geocoder function returns a geocoder"""
geocoder = get_geocoder()
self.assertTrue(isinstance(geocoder, Geocoder))
def test_geocode_with_default_geocoder(self):
"""Test geocoding with default geocoder"""
self._select_geocoder()
geocoder = get_geocoder()
address = "370 17th St, Denver"
results = list(geocoder.geocode(address, exactly_one=False))
self.assertTrue(len(results) > 0)
place, (lat, lng) = results[0]
self.assertEqual(lat, 39.7434926)
self.assertEqual(lng, -104.9886368)
class GeocoderResourceTest(MockGeocoderTestMixin, SloppyComparisonTestMixin,
SettingsChangingTestCase):
"""Tests for geocoding endpoint"""
def get_settings_module(self):
from storybase_geo import settings
return settings
def test_geocode_address(self):
"""Test geocoding a street address"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "370 17th St, Denver, CO 80202"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertApxEqual(results[0].lat, 39.7434926)
self.assertApxEqual(results[0].lng, -104.9886368)
def test_geocode_intersection(self):
"""Test geocoding an intersection"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "colfax and chambers, aurora, co"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertApxEqual(results[0].lat, 39.7399986)
self.assertApxEqual(results[0].lng, -104.8099387)
def test_geocode_city_state(self):
"""Test geocoding a city and state"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "golden, co"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertApxEqual(results[0].lat, 39.756655, .001)
self.assertApxEqual(results[0].lng, -105.224949, .001)
def test_geocode_zip(self):
"""Test geocoding a zip code with Yahoo geocoder"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "80202"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertApxEqual(results[0].lat, 39.7541032, .01)
self.assertApxEqual(results[0].lng, -105.000224, .01)
def test_geocode_city(self):
"""Test geocoding a city with Yahoo geocoder"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "Denver"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertApxEqual(results[0].lat, 39.737567, .01)
self.assertApxEqual(results[0].lng, -104.9847179, .01)
def test_geocode_failure(self):
"""Test that results list is empty if no match is found"""
self._select_geocoder()
resource = GeocoderResource()
req = HttpRequest()
req.method = 'GET'
req.GET['q'] = "11zzzzzzzzzz1234asfdasdasgw"
bundle = resource.build_bundle(request=req)
results = resource.obj_get_list(bundle)
self.assertEqual(len(results), 0)
class LocationResourceTest(ResourceTestCase):
def setUp(self):
super(LocationResourceTest, self).setUp()
self.ap_client = TestApiClient()
self.username = 'test'
self.password = 'test'
self.user = User.objects.create_user(self.username,
'test@example.com', self.password)
self.user2 = User.objects.create_user("test2", "test2@example.com",
"test2")
self.story = create_story(title="Test Story", summary="Test Summary",
byline="Test Byline", status="published", language="en",
author=self.user)
self.location_attrs = [
{
"name": "The Piton Foundation",
"address": "370 17th St",
"address2": "#5300",
"city": "Denver",
"state": "CO",
"postcode": "80202",
},
{
'name': "The Hull House",
'address': "800 S. Halsted St.",
"city": "Chicago",
"state": "IL",
"postcode": "60607",
},
{
'name': "Bucktown-Wicker Park Library",
'address': "1701 North Milwaukee Ave.",
'city': "Chicago",
'state': "IL",
'postcode': "60647",
}
]
def test_get_list_with_story(self):
for attrs in self.location_attrs:
Location.objects.create(**attrs)
self.assertEqual(Location.objects.count(), 3)
self.story.locations.add(*list(Location.objects.filter(name__in=("The Hull House", "The Piton Foundation"))))
self.story.save()
self.assertEqual(self.story.locations.count(), 2)
uri = '/api/0.1/locations/stories/%s/' % (self.story.story_id)
resp = self.api_client.get(uri)
self.assertValidJSONResponse(resp)
self.assertEqual(len(self.deserialize(resp)['objects']), 2)
for retrieved_attrs in self.deserialize(resp)['objects']:
self.assertIn(retrieved_attrs['name'], ("The Hull House", "The Piton Foundation"))
def test_post_list_with_story(self):
post_data = {
'name': "Mo Betta Green Market",
'lat': 39.7533324751841,
'lng': -104.979961178185
}
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.story.locations.count(), 0)
self.api_client.client.login(username=self.username,
password=self.password)
uri = '/api/0.1/locations/stories/%s/' % (self.story.story_id)
resp = self.api_client.post(uri, format='json', data=post_data)
self.assertHttpCreated(resp)
returned_id = resp['location'].split('/')[-2]
# Confirm that a location object was created
self.assertEqual(Location.objects.count(), 1)
# Compare the response data with the post_data
self.assertEqual(self.deserialize(resp)['name'],
post_data['name'])
self.assertEqual(self.deserialize(resp)['lat'],
post_data['lat'])
self.assertEqual(self.deserialize(resp)['lng'],
post_data['lng'])
created_obj = Location.objects.get()
# Compare the id from the resource URI with the created object
self.assertEqual(created_obj.location_id, returned_id)
# Compare the created model instance with the post data
self.assertEqual(created_obj.name, post_data['name'])
self.assertEqual(created_obj.lat, post_data['lat'])
self.assertEqual(created_obj.lng, post_data['lng'])
# Test that the created object is associated with the story
self.assertEqual(self.story.locations.count(), 1)
self.assertIn(created_obj, self.story.locations.all())
def test_post_list_with_story_unauthenticated(self):
"""Test that an unauthenticated user can't create a location"""
post_data = {
'name': "Mo Betta Green Market",
'lat': 39.7533324751841,
'lng': -104.979961178185
}
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.story.locations.count(), 0)
uri = '/api/0.1/locations/stories/%s/' % (self.story.story_id)
resp = self.api_client.post(uri, format='json', data=post_data)
self.assertHttpUnauthorized(resp)
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.story.locations.count(), 0)
def test_post_list_with_story_unauthorized(self):
"""
Test that an authenticated user can't create a location
associated with another user's story
"""
self.story.author = self.user2
self.story.save()
post_data = {
'name': "Mo Betta Green Market",
'lat': 39.7533324751841,
'lng': -104.979961178185
}
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.story.locations.count(), 0)
self.api_client.client.login(username=self.username,
password=self.password)
uri = '/api/0.1/locations/stories/%s/' % (self.story.story_id)
resp = self.api_client.post(uri, format='json', data=post_data)
self.assertHttpUnauthorized(resp)
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.story.locations.count(), 0)
def test_delete_detail(self):
obj = Location.objects.create(**self.location_attrs[0])
obj.owner = self.user
obj.save()
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(self.user.locations.count(), 1)
self.api_client.client.login(username=self.username,
password=self.password)
uri = '/api/0.1/locations/%s/' % (obj.location_id)
resp = self.api_client.delete(uri, format='json')
self.assertHttpAccepted(resp)
self.assertEqual(Location.objects.count(), 0)
self.assertEqual(self.user.locations.count(), 0)
def test_delete_detail_unauthenticated(self):
"""Tests that an unauthenticated user cannot delete a location"""
obj = Location.objects.create(**self.location_attrs[0])
obj.owner = self.user
obj.save()
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(self.user.locations.count(), 1)
uri = '/api/0.1/locations/%s/' % (obj.location_id)
resp = self.api_client.delete(uri, format='json')
self.assertHttpUnauthorized(resp)
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(self.user.locations.count(), 1)
def test_delete_detail_unauthorized(self):
"""Tests that an unauthorized user cannot delete a location"""
obj = Location.objects.create(**self.location_attrs[0])
obj.owner = self.user2
obj.save()
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(self.user2.locations.count(), 1)
self.api_client.client.login(username=self.username,
password=self.password)
uri = '/api/0.1/locations/%s/' % (obj.location_id)
resp = self.api_client.delete(uri, format='json')
self.assertHttpUnauthorized(resp)
self.assertEqual(Location.objects.count(), 1)
self.assertEqual(self.user2.locations.count(), 1)
| mit |
dongjoon-hyun/tensorflow | tensorflow/contrib/keras/api/keras/applications/xception/__init__.py | 39 | 1106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.applications.xception import decode_predictions
from tensorflow.python.keras.applications.xception import preprocess_input
from tensorflow.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| apache-2.0 |
pradiptad/zulip | bots/zulip_git_config.py | 125 | 1688 | # Zulip, Inc's internal git plugin configuration.
# The plugin and example config are under api/integrations/
# Leaving all the instructions out of this file to avoid having to
# sync them as we update the comments.
ZULIP_USER = "commit-bot@zulip.com"
ZULIP_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# commit_notice_destination() lets you customize where commit notices
# are sent to.
#
# It takes the following arguments:
# * repo = the name of the git repository
# * branch = the name of the branch that was pushed to
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification, e.g. for ).
#
# The default code below will send every commit pushed to "master" to
# * stream "commits"
# * topic "master"
# And similarly for branch "test-post-receive" (for use when testing).
def commit_notice_destination(repo, branch, commit):
if branch in ["master", "prod", "test-post-receive"]:
return dict(stream = 'test' if 'test-' in branch else 'commits',
subject = u"%s" % (branch,))
# Return None for cases where you don't want a notice sent
return None
# Modify this function to change how commits are displayed; the most
# common customization is to include a link to the commit in your
# graphical repository viewer, e.g.
#
# return '!avatar(%s) [%s](https://example.com/commits/%s)\n' % (author, subject, commit_id)
def format_commit_message(author, subject, commit_id):
return '!avatar(%s) [%s](https://git.zulip.net/eng/zulip/commit/%s)\n' % (author, subject, commit_id)
ZULIP_API_PATH = "/home/zulip/zulip/api"
ZULIP_SITE = "https://zulip.com"
| apache-2.0 |
x111ong/odoo | openerp/tools/view_validation.py | 367 | 2303 | """ View validation code (using assertions, not the RNG schema). """
import logging
_logger = logging.getLogger(__name__)
def valid_page_in_book(arch):
"""A `page` node must be below a `book` node."""
return not arch.xpath('//page[not(ancestor::notebook)]')
def valid_field_in_graph(arch):
""" Children of ``graph`` can only be ``field`` """
for child in arch.xpath('/graph/child::*'):
if child.tag != 'field':
return False
return True
def valid_field_in_tree(arch):
""" Children of ``tree`` view must be ``field`` or ``button``."""
for child in arch.xpath('/tree/child::*'):
if child.tag not in ('field', 'button'):
return False
return True
def valid_att_in_field(arch):
""" ``field`` nodes must all have a ``@name`` """
return not arch.xpath('//field[not(@name)]')
def valid_att_in_label(arch):
""" ``label`` nodes must have a ``@for`` or a ``@string`` """
return not arch.xpath('//label[not(@for or @string)]')
def valid_att_in_form(arch):
return True
def valid_type_in_colspan(arch):
"""A `colspan` attribute must be an `integer` type."""
for attrib in arch.xpath('//*/@colspan'):
try:
int(attrib)
except:
return False
return True
def valid_type_in_col(arch):
"""A `col` attribute must be an `integer` type."""
for attrib in arch.xpath('//*/@col'):
try:
int(attrib)
except:
return False
return True
def valid_view(arch):
if arch.tag == 'form':
for pred in [valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label]:
if not pred(arch):
_logger.error('Invalid XML: %s', pred.__doc__)
return False
elif arch.tag == 'graph':
for pred in [valid_field_in_graph, valid_att_in_field]:
if not pred(arch):
_logger.error('Invalid XML: %s', pred.__doc__)
return False
elif arch.tag == 'tree':
for pred in [valid_field_in_tree, valid_att_in_field]:
if not pred(arch):
_logger.error('Invalid XML: %s', pred.__doc__)
return False
return True
| agpl-3.0 |
luzhijun/Optimization | cma-es/batchcompute_python_sdk/examples/worker_package/oss_python_sdk/oss_api.py | 3 | 60735 | #!/usr/bin/env python
#coding=utf-8
# Copyright (c) 2011, Alibaba Cloud Computing
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import httplib
import time
import base64
import urllib
import StringIO
import sys
import socket
try:
from oss.oss_util import *
except:
from oss_util import *
try:
from oss.oss_xml_handler import *
except:
from oss_xml_handler import *
class OssAPI:
'''
A simple OSS API
'''
DefaultContentType = 'application/octet-stream'
provider = PROVIDER
__version__ = '0.3.5'
Version = __version__
AGENT = 'oss-python%s (%s)' % (__version__, sys.platform)
def __init__(self, host='oss.aliyuncs.com', access_id='', secret_access_key='', port=80, is_security=False):
self.SendBufferSize = 8192
self.RecvBufferSize = 1024*1024*10
self.host = get_host_from_list(host)
self.port = port
self.access_id = access_id
self.secret_access_key = secret_access_key
self.show_bar = False
self.is_security = is_security
self.retry_times = 5
self.agent = self.AGENT
self.debug = False
self.timeout = 60
def set_timeout(self, timeout):
self.timeout = timeout
def set_debug(self, is_debug):
if is_debug:
self.debug = True
def set_retry_times(self, retry_times=5):
self.retry_times = retry_times
def set_send_buf_size(self, buf_size):
try:
self.SendBufferSize = (int)(buf_size)
except ValueError:
pass
def set_recv_buf_size(self, buf_size):
try:
self.RecvBufferSize = (int)(buf_size)
except ValueError:
pass
def get_connection(self, tmp_host=None):
host = ''
port = 80
if not tmp_host:
tmp_host = self.host
host_port_list = tmp_host.split(":")
if len(host_port_list) == 1:
host = host_port_list[0].strip()
elif len(host_port_list) == 2:
host = host_port_list[0].strip()
port = int(host_port_list[1].strip())
if self.is_security or port == 443:
self.is_security = True
if sys.version_info >= (2, 6):
return httplib.HTTPSConnection(host=host, port=port, timeout=self.timeout)
else:
#XXX
if not (socket.getdefaulttimeout()):
socket.setdefaulttimeout(120)
return httplib.HTTPSConnection(host=host, port=port)
else:
if sys.version_info >= (2, 6):
return httplib.HTTPConnection(host=host, port=port, timeout=self.timeout)
else:
#XXX
if not (socket.getdefaulttimeout()):
socket.setdefaulttimeout(120)
return httplib.HTTPConnection(host=host, port=port)
def sign_url_auth_with_expire_time(self, method, url, headers=None, resource="/", timeout=60, params=None):
'''
Create the authorization for OSS based on the input method, url, body and headers
:type method: string
:param method: one of PUT, GET, DELETE, HEAD
:type url: string
:param:HTTP address of bucket or object, eg: http://HOST/bucket/object
:type headers: dict
:param: HTTP header
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
:type timeout: int
:param
Returns:
signature url.
'''
if not headers:
headers = {}
if not params:
params = {}
send_time = str(int(time.time()) + timeout)
headers['Date'] = send_time
auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug)
params["OSSAccessKeyId"] = self.access_id
params["Expires"] = str(send_time)
params["Signature"] = auth_value
sign_url = append_param(url, params)
return sign_url
def sign_url(self, method, bucket, object, timeout=60, headers=None, params=None):
'''
Create the authorization for OSS based on the input method, url, body and headers
:type method: string
:param method: one of PUT, GET, DELETE, HEAD
:type bucket: string
:param:
:type object: string
:param:
:type timeout: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
Returns:
signature url.
'''
if not headers:
headers = {}
if not params:
params = {}
send_time = str(int(time.time()) + timeout)
headers['Date'] = send_time
object = convert_utf8(object)
resource = "/%s/%s%s" % (bucket, object, get_resource(params))
auth_value = get_assign(self.secret_access_key, method, headers, resource, None, self.debug)
params["OSSAccessKeyId"] = self.access_id
params["Expires"] = str(send_time)
params["Signature"] = auth_value
url = ''
object = oss_quote(object)
http = "http"
if self.is_security:
http = "https"
if is_ip(self.host):
url = "%s://%s/%s/%s" % (http, self.host, bucket, object)
elif is_oss_host(self.host):
if check_bucket_valid(bucket):
url = "%s://%s.%s/%s" % (http, bucket, self.host, object)
else:
url = "%s://%s/%s/%s" % (http, self.host, bucket, object)
else:
url = "%s://%s/%s" % (http, self.host, object)
sign_url = append_param(url, params)
return sign_url
def _create_sign_for_normal_auth(self, method, headers=None, resource="/"):
'''
NOT public API
Create the authorization for OSS based on header input.
it should be put into "Authorization" parameter of header.
:type method: string
:param:one of PUT, GET, DELETE, HEAD
:type headers: dict
:param: HTTP header
:type resource: string
:param:path of bucket or object, eg: /bucket/ or /bucket/object
Returns:
signature string
'''
auth_value = "%s %s:%s" % (self.provider, self.access_id, get_assign(self.secret_access_key, method, headers, resource, None, self.debug))
return auth_value
def bucket_operation(self, method, bucket, headers=None, params=None):
return self.http_request(method, bucket, '', headers, '', params)
def object_operation(self, method, bucket, object, headers=None, body='', params=None):
return self.http_request(method, bucket, object, headers, body, params)
def http_request(self, method, bucket, object, headers=None, body='', params=None):
'''
Send http request of operation
:type method: string
:param method: one of PUT, GET, DELETE, HEAD, POST
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type body: string
:param
Returns:
HTTP Response
'''
retry = 5
res = None
while retry > 0:
retry -= 1
tmp_bucket = bucket
tmp_object = object
tmp_headers = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
tmp_params = {}
if params and isinstance(params, dict):
tmp_params = params.copy()
res = self.http_request_with_redirect(method, tmp_bucket, tmp_object, tmp_headers, body, tmp_params)
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
else:
return res
return res
def http_request_with_redirect(self, method, bucket, object, headers=None, body='', params=None):
'''
Send http request of operation
:type method: string
:param method: one of PUT, GET, DELETE, HEAD, POST
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type body: string
:param
Returns:
HTTP Response
'''
if not params:
params = {}
if not headers:
headers = {}
object = convert_utf8(object)
if not bucket:
resource = "/"
headers['Host'] = self.host
else:
headers['Host'] = "%s.%s" % (bucket, self.host)
if not is_oss_host(self.host):
headers['Host'] = self.host
resource = "/%s/" % bucket
resource = convert_utf8(resource)
resource = "%s%s%s" % (resource, object, get_resource(params))
object = oss_quote(object)
url = "/%s" % object
if is_ip(self.host):
url = "/%s/%s" % (bucket, object)
if not bucket:
url = "/%s" % object
headers['Host'] = self.host
url = append_param(url, params)
date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
headers['Date'] = date
headers['Authorization'] = self._create_sign_for_normal_auth(method, headers, resource)
headers['User-Agent'] = self.agent
if check_bucket_valid(bucket) and not is_ip(self.host):
conn = self.get_connection(headers['Host'])
else:
conn = self.get_connection()
conn.request(method, url, body, headers)
return conn.getresponse()
def get_service(self, headers=None, prefix='', marker='', maxKeys=''):
'''
List all buckets of user
'''
return self.list_all_my_buckets(headers, prefix, marker, maxKeys)
def list_all_my_buckets(self, headers=None, prefix='', marker='', maxKeys=''):
'''
List all buckets of user
type headers: dict
:param
Returns:
HTTP Response
'''
method = 'GET'
bucket = ''
object = ''
body = ''
params = {}
if prefix != '':
params['prefix'] = prefix
if marker != '':
params['marker'] = marker
if maxKeys != '':
params['max-keys'] = maxKeys
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket_acl(self, bucket):
'''
Get Access Control Level of bucket
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
headers = {}
body = ''
params = {}
params['acl'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket_location(self, bucket):
'''
Get Location of bucket
'''
method = 'GET'
object = ''
headers = {}
body = ''
params = {}
params['location'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None):
'''
List object that in bucket
'''
return self.list_bucket(bucket, prefix, marker, delimiter, maxkeys, headers)
def list_bucket(self, bucket, prefix='', marker='', delimiter='', maxkeys='', headers=None):
'''
List object that in bucket
:type bucket: string
:param
:type prefix: string
:param
:type marker: string
:param
:type delimiter: string
:param
:type maxkeys: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['prefix'] = prefix
params['marker'] = marker
params['delimiter'] = delimiter
params['max-keys'] = maxkeys
return self.http_request(method, bucket, object, headers, body, params)
def get_website(self, bucket, headers=None):
'''
Get bucket website
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_lifecycle(self, bucket, headers=None):
'''
Get bucket lifecycle
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_logging(self, bucket, headers=None):
'''
Get bucket logging
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['logging'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_cors(self, bucket, headers=None):
'''
Get bucket cors
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def create_bucket(self, bucket, acl='', headers=None):
'''
Create bucket
'''
return self.put_bucket(bucket, acl, headers)
def put_bucket(self, bucket, acl='', headers=None):
'''
Create bucket
:type bucket: string
:param
:type acl: string
:param: one of private public-read public-read-write
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if acl != '':
if "AWS" == self.provider:
headers['x-amz-acl'] = acl
else:
headers['x-oss-acl'] = acl
method = 'PUT'
object = ''
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def put_logging(self, sourcebucket, targetbucket, prefix):
'''
Put bucket logging
:type sourcebucket: string
:param
:type targetbucket: string
:param: Specifies the bucket where you want Aliyun OSS to store server access logs
:type prefix: string
:param: This element lets you specify a prefix for the objects that the log files will be stored
Returns:
HTTP Response
'''
body = '<BucketLoggingStatus>'
if targetbucket:
body += '<LoggingEnabled>'
body += '<TargetBucket>%s</TargetBucket>' % targetbucket
if prefix:
body += '<TargetPrefix>%s</TargetPrefix>' % prefix
body += '</LoggingEnabled>'
body += '</BucketLoggingStatus>'
method = 'PUT'
object = ''
params = {}
headers = {}
params['logging'] = ''
return self.http_request(method, sourcebucket, object, headers, body, params)
def put_website(self, bucket, indexfile, errorfile):
'''
Put bucket website
:type bucket: string
:param
:type indexfile: string
:param: the object that contain index page
:type errorfile: string
:param: the object taht contain error page
Returns:
HTTP Response
'''
indexfile = convert_utf8(indexfile)
errorfile = convert_utf8(errorfile)
body = '<WebsiteConfiguration><IndexDocument><Suffix>%s</Suffix></IndexDocument><ErrorDocument><Key>%s</Key></ErrorDocument></WebsiteConfiguration>' % (indexfile, errorfile)
method = 'PUT'
object = ''
headers = {}
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_lifecycle(self, bucket, lifecycle):
'''
Put bucket lifecycle
:type bucket: string
:param
:type lifecycle: string
:param: lifecycle configuration
Returns:
HTTP Response
'''
body = lifecycle
method = 'PUT'
object = ''
headers = {}
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_cors(self, bucket, cors_xml, headers=None):
'''
Put bucket cors
:type bucket: string
:param
:type cors_xml: string
:param: the xml that contain cors rules
Returns:
HTTP Response
'''
body = cors_xml
method = 'PUT'
object = ''
if not headers:
headers = {}
headers['Content-Length'] = str(len(body))
base64md5 = base64.encodestring(md5.new(body).digest()).strip()
headers['Content-MD5'] = base64md5
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_bucket_with_location(self, bucket, acl='', location='', headers=None):
'''
Create bucket
:type bucket: string
:param
:type acl: string
:param: one of private public-read public-read-write
:type location: string
:param:
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if acl != '':
if "AWS" == self.provider:
headers['x-amz-acl'] = acl
else:
headers['x-oss-acl'] = acl
params = {}
body = ''
if location != '':
body = r'<CreateBucketConfiguration>'
body += r'<LocationConstraint>'
body += location
body += r'</LocationConstraint>'
body += r'</CreateBucketConfiguration>'
method = 'PUT'
object = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_bucket(self, bucket, headers=None):
'''
Delete bucket
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def delete_website(self, bucket, headers=None):
'''
Delete bucket website
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['website'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_lifecycle(self, bucket, headers=None):
'''
Delete bucket lifecycle
:type bucket: string
:param
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['lifecycle'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_logging(self, bucket, headers=None):
'''
Delete bucket logging
:type bucket: string
:param:
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['logging'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def delete_cors(self, bucket, headers=None):
'''
Delete bucket cors
:type bucket: string
:param:
Returns:
HTTP Response
'''
method = 'DELETE'
object = ''
body = ''
params = {}
params['cors'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def put_object_with_data(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is from input_content
'''
return self.put_object_from_string(bucket, object, input_content, content_type, headers, params)
def put_object_from_string(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is from input_content
:type bucket: string
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = "PUT"
return self._put_or_post_object_from_string(method, bucket, object, input_content, content_type, headers, params)
def post_object_from_string(self, bucket, object, input_content, content_type='', headers=None, params=None):
'''
Post object into bucket, the content of object is from input_content
:type bucket: string
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = "POST"
return self._put_or_post_object_from_string(method, bucket, object, input_content, content_type, headers, params)
def _put_or_post_object_from_string(self, method, bucket, object, input_content, content_type, headers, params):
if not headers:
headers = {}
if not content_type:
content_type = get_content_type_by_filename(object)
if not headers.has_key('Content-Type') and not headers.has_key('content-type'):
headers['Content-Type'] = content_type
headers['Content-Length'] = str(len(input_content))
fp = StringIO.StringIO(input_content)
if "POST" == method:
res = self.post_object_from_fp(bucket, object, fp, content_type, headers, params)
else:
res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def _open_conn_to_put_object(self, method, bucket, object, filesize, content_type=DefaultContentType, headers=None, params=None):
'''
NOT public API
Open a connectioon to put object
:type bucket: string
:param
:type filesize: int
:param
:type object: string
:param
:type input_content: string
:param
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
Initialized HTTPConnection
'''
if not params:
params = {}
if not headers:
headers = {}
object = convert_utf8(object)
resource = "/%s/" % bucket
if not bucket:
resource = "/"
resource = convert_utf8(resource)
resource = "%s%s%s" % (resource, object, get_resource(params))
object = oss_quote(object)
url = "/%s" % object
if bucket:
headers['Host'] = "%s.%s" % (bucket, self.host)
if not is_oss_host(self.host):
headers['Host'] = self.host
else:
headers['Host'] = self.host
if is_ip(self.host):
url = "/%s/%s" % (bucket, object)
headers['Host'] = self.host
url = append_param(url, params)
date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
if check_bucket_valid(bucket) and not is_ip(self.host):
conn = self.get_connection(headers['Host'])
else:
conn = self.get_connection()
conn.putrequest(method, url)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type') and not headers.has_key('content-type'):
headers['Content-Type'] = content_type
headers["Content-Length"] = filesize
headers["Date"] = date
headers["Expect"] = "100-Continue"
headers['User-Agent'] = self.agent
for k in headers.keys():
conn.putheader(str(k), str(headers[k]))
if '' != self.secret_access_key and '' != self.access_id:
auth = self._create_sign_for_normal_auth(method, headers, resource)
conn.putheader("Authorization", auth)
conn.endheaders()
return conn
def put_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None):
'''
put object into bucket, the content of object is read from file
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
fp = open(filename, 'rb')
if not content_type:
content_type = get_content_type_by_filename(filename)
res = self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def post_object_from_file(self, bucket, object, filename, content_type='', headers=None, params=None):
'''
post object into bucket, the content of object is read from file
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
fp = open(filename, 'rb')
if not content_type:
content_type = get_content_type_by_filename(filename)
res = self.post_object_from_fp(bucket, object, fp, content_type, headers, params)
fp.close()
return res
def view_bar(self, num=1, sum=100):
rate = float(num) / float(sum)
rate_num = int(rate * 100)
print '\r%d%% ' % (rate_num),
sys.stdout.flush()
def put_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
'''
Put object into bucket, the content of object is read from file pointer
:type bucket: string
:param
:type object: string
:param
:type fp: file
:param: the pointer of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'PUT'
return self._put_or_post_object_from_fp(method, bucket, object, fp, content_type, headers, params)
def post_object_from_fp(self, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
'''
Post object into bucket, the content of object is read from file pointer
:type bucket: string
:param
:type object: string
:param
:type fp: file
:param: the pointer of the read file
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'POST'
return self._put_or_post_object_from_fp(method, bucket, object, fp, content_type, headers, params)
def _put_or_post_object_from_fp(self, method, bucket, object, fp, content_type=DefaultContentType, headers=None, params=None):
tmp_object = object
tmp_headers = {}
tmp_params = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if params and isinstance(params, dict):
tmp_params = params.copy()
fp.seek(os.SEEK_SET, os.SEEK_END)
filesize = fp.tell()
fp.seek(os.SEEK_SET)
conn = self._open_conn_to_put_object(method, bucket, object, filesize, content_type, headers, params)
totallen = 0
l = fp.read(self.SendBufferSize)
retry_times = 0
while len(l) > 0:
if retry_times > 100:
print "reach max retry times;%s" % retry_times
raise
try:
conn.send(l)
retry_times = 0
except:
retry_times += 1
continue
totallen += len(l)
if self.show_bar:
self.view_bar(totallen, filesize)
l = fp.read(self.SendBufferSize)
res = conn.getresponse()
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
return self.put_object_from_fp(bucket, tmp_object, fp, content_type, tmp_headers, tmp_params)
return res
def get_object(self, bucket, object, headers=None, params=None):
'''
Get object
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def get_object_to_file(self, bucket, object, filename, headers=None):
'''
Get object and write the content of object into a file
:type bucket: string
:param
:type object: string
:param
:type filename: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
res = self.get_object(bucket, object, headers)
totalread = 0
if res.status / 100 == 2:
header = {}
header = convert_header2map(res.getheaders())
filesize = safe_get_element("content-length", header)
f = file(filename, 'wb')
data = ''
while True:
data = res.read(self.RecvBufferSize)
if data:
f.write(data)
totalread += len(data)
if self.show_bar:
self.view_bar(totalread, filesize)
else:
break
f.close()
# TODO: get object with flow
return res
def delete_object(self, bucket, object, headers=None):
'''
Delete object
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'DELETE'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def head_object(self, bucket, object, headers=None):
'''
Head object, to get the meta message of object without the content
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'HEAD'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def create_link_from_list(self, bucket, object, object_list=None, headers=None, params=None):
object_link_msg_xml = create_object_link_msg_xml_by_name(object_list)
return self.create_link(bucket, object, object_link_msg_xml, headers, params)
def create_link(self, bucket, object, object_link_msg_xml, headers=None, params=None):
'''
Create object link, merge all objects in object_link_msg_xml into one object
:type bucket: string
:param
:type object: string
:param
:type object_link_msg_xml: string
:param: xml format string, like
<CreateObjectLink>
<Part>
<PartNumber>N</PartNumber>
<PartName>objectN</PartName>
</Part>
</CreateObjectLink>
:type headers: dict
:param: HTTP header
:type params: dict
:param: parameters
Returns:
HTTP Response
'''
method = 'PUT'
if not headers:
headers = {}
if not params:
params = {}
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
body = object_link_msg_xml
params['link'] = ''
headers['Content-Length'] = str(len(body))
return self.http_request(method, bucket, object, headers, body, params)
def get_link_index(self, bucket, object, headers=None, params=None):
'''
Get all objects linked
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
if not headers:
headers = {}
if not params:
params = {}
params['link'] = ''
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def post_object_group(self, bucket, object, object_group_msg_xml, headers=None, params=None):
'''
Post object group, merge all objects in object_group_msg_xml into one object
:type bucket: string
:param
:type object: string
:param
:type object_group_msg_xml: string
:param: xml format string, like
<CreateFileGroup>
<Part>
<PartNumber>N</PartNumber>
<FileName>objectN</FileName>
<Etag>"47BCE5C74F589F4867DBD57E9CA9F808"</Etag>
</Part>
</CreateFileGroup>
:type headers: dict
:param: HTTP header
:type params: dict
:param: parameters
Returns:
HTTP Response
'''
method = 'POST'
if not headers:
headers = {}
if not params:
params = {}
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
body = object_group_msg_xml
params['group'] = ''
headers['Content-Length'] = str(len(body))
return self.http_request(method, bucket, object, headers, body, params)
def get_object_group_index(self, bucket, object, headers=None):
'''
Get object group_index
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
headers["x-oss-file-group"] = ''
method = 'GET'
body = ''
params = {}
return self.http_request(method, bucket, object, headers, body, params)
def upload_part_from_file_given_pos(self, bucket, object, filename, offset, partsize, upload_id, part_number, headers=None, params=None):
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
return self.put_object_from_file_given_pos(bucket, object, filename, offset, partsize, content_type, headers, params)
def put_object_from_file_given_pos(self, bucket, object, filename, offset, partsize, content_type='', headers=None, params=None):
'''
Put object into bucket, the content of object is read from given posision of filename
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type offset: int
:param: the given position of file
:type partsize: int
:param: the size of read content
:type content_type: string
:param: the object content type that supported by HTTP
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
tmp_object = object
tmp_headers = {}
tmp_params = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if params and isinstance(params, dict):
tmp_params = params.copy()
fp = open(filename, 'rb')
if offset > os.path.getsize(filename):
fp.seek(os.SEEK_SET, os.SEEK_END)
else:
fp.seek(offset)
if not content_type:
content_type = get_content_type_by_filename(filename)
method = 'PUT'
conn = self._open_conn_to_put_object(method, bucket, object, partsize, content_type, headers, params)
left_len = partsize
while 1:
if left_len <= 0:
break
elif left_len < self.SendBufferSize:
buffer_content = fp.read(left_len)
else:
buffer_content = fp.read(self.SendBufferSize)
if buffer_content:
retry_times = 0
while 1:
if retry_times > 100:
print "reach max retry times;%s" % retry_times
fp.close()
raise
try:
conn.send(buffer_content)
retry_times = 0
break
except:
retry_times += 1
continue
left_len = left_len - len(buffer_content)
fp.close()
res = conn.getresponse()
if check_redirect(res):
self.host = helper_get_host_from_resp(res, bucket)
return self.put_object_from_file_given_pos(bucket, tmp_object, filename, offset, partsize
, content_type, tmp_headers, tmp_params)
return res
def upload_large_file(self, bucket, object, filename, thread_num=10, max_part_num=1000, headers=None):
'''
Upload large file, the content is read from filename.
The large file is splitted into many parts. It will put the many parts into bucket
and then merge all the parts into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
Returns:
HTTP Response
'''
#split the large file into 1000 parts or many parts
#get part_msg_list
if not headers:
headers = {}
filename = convert_utf8(filename)
part_msg_list = split_large_file(filename, object, max_part_num)
#make sure all the parts are put into same bucket
if len(part_msg_list) < thread_num and len(part_msg_list) != 0:
thread_num = len(part_msg_list)
step = len(part_msg_list) / thread_num
retry_times = self.retry_times
while(retry_times >= 0):
try:
threadpool = []
for i in xrange(0, thread_num):
if i == thread_num - 1:
end = len(part_msg_list)
else:
end = i * step + step
begin = i * step
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
current = PutObjectGroupWorker(oss, bucket, filename, part_msg_list[begin:end], retry_times)
threadpool.append(current)
current.start()
for item in threadpool:
item.join()
break
except:
retry_times = retry_times -1
if -1 >= retry_times:
print "after retry %s, failed, upload large file failed!" % retry_times
return
#get xml string that contains msg of object group
object_group_msg_xml = create_object_group_msg_xml(part_msg_list)
content_type = get_content_type_by_filename(filename)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type'):
headers['Content-Type'] = content_type
return self.post_object_group(bucket, object, object_group_msg_xml, headers)
def upload_large_file_by_link(self, bucket, object, filename, thread_num=5, max_part_num=50, headers=None):
'''
Upload large file, the content is read from filename. The large file is splitted into many parts.
all the parts are put into bucket and then merged into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
Returns:
HTTP Response
'''
#split the large file into 100 parts or many parts
#get part_msg_list
if not headers:
headers = {}
filename = convert_utf8(filename)
part_msg_list = split_large_file(filename, object, max_part_num)
#make sure all the parts are put into same bucket
if len(part_msg_list) < thread_num and len(part_msg_list) != 0:
thread_num = len(part_msg_list)
step = len(part_msg_list) / thread_num
retry_times = self.retry_times
while(retry_times >= 0):
try:
threadpool = []
for i in xrange(0, thread_num):
if i == thread_num - 1:
end = len(part_msg_list)
else:
end = i * step + step
begin = i * step
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
current = PutObjectLinkWorker(oss, bucket, filename, part_msg_list[begin:end], self.retry_times)
threadpool.append(current)
current.start()
for item in threadpool:
item.join()
break
except:
retry_times = retry_times -1
if -1 >= retry_times:
print "after retry %s, failed, upload large file failed!" % retry_times
return
#get xml string that contains msg of object link
object_link_msg_xml = create_object_link_msg_xml(part_msg_list)
content_type = get_content_type_by_filename(filename)
content_type = convert_utf8(content_type)
if not headers.has_key('Content-Type'):
headers['Content-Type'] = content_type
return self.create_link(bucket, object, object_link_msg_xml, headers)
def copy_object(self, source_bucket, source_object, target_bucket, target_object, headers=None):
'''
Copy object
:type source_bucket: string
:param
:type source_object: string
:param
:type target_bucket: string
:param
:type target_object: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
source_object = convert_utf8(source_object)
source_object = oss_quote(source_object)
headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object)
method = 'PUT'
body = ''
params = {}
return self.http_request(method, target_bucket, target_object, headers, body, params)
def init_multi_upload(self, bucket, object, headers=None, params=None):
'''
Init multi upload
:type bucket: string
:param
:type object: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
if not headers:
headers = {}
method = 'POST'
body = ''
params['uploads'] = ''
if isinstance(headers, dict) and not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
return self.http_request(method, bucket, object, headers, body, params)
def get_all_parts(self, bucket, object, upload_id, max_parts=None, part_number_marker=None):
'''
List all upload parts of given upload_id
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type max_parts: int
:param
:type part_number_marker: string
:param
Returns:
HTTP Response
'''
method = 'GET'
headers = {}
body = ''
params = {}
params['uploadId'] = upload_id
if max_parts:
params['max-parts'] = max_parts
if part_number_marker:
params['part-number-marker'] = part_number_marker
return self.http_request(method, bucket, object, headers, body, params)
def get_all_multipart_uploads(self, bucket, delimiter=None, max_uploads=None, key_marker=None, prefix=None, upload_id_marker=None, headers=None):
'''
List all upload_ids and their parts
:type bucket: string
:param
:type delimiter: string
:param
:type max_uploads: string
:param
:type key_marker: string
:param
:type prefix: string
:param
:type upload_id_marker: string
:param
:type headers: dict
:param: HTTP header
Returns:
HTTP Response
'''
method = 'GET'
object = ''
body = ''
params = {}
params['uploads'] = ''
if delimiter:
params['delimiter'] = delimiter
if max_uploads:
params['max-uploads'] = max_uploads
if key_marker:
params['key-marker'] = key_marker
if prefix:
params['prefix'] = prefix
if upload_id_marker:
params['upload-id-marker'] = upload_id_marker
return self.http_request(method, bucket, object, headers, body, params)
def upload_part(self, bucket, object, filename, upload_id, part_number, headers=None, params=None):
'''
Upload the content of filename as one part of given upload_id
:type bucket: string
:param
:type object: string
:param
:type filename: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
return self.put_object_from_file(bucket, object, filename, content_type, headers, params)
def upload_part_from_string(self, bucket, object, data, upload_id, part_number, headers=None, params=None):
'''
Upload the content of string as one part of given upload_id
:type bucket: string
:param
:type object: string
:param
:type data: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
params['partNumber'] = part_number
params['uploadId'] = upload_id
content_type = ''
fp = StringIO.StringIO(data)
return self.put_object_from_fp(bucket, object, fp, content_type, headers, params)
def copy_object_as_part(self, source_bucket, source_object, target_bucket,
target_object, upload_id, part_number, headers=None, params=None):
'''
Upload a part with data copy from srouce object in source bucket
:type source_bucket: string
:param
:type source_object: string
:param
:type target_bucket: string
:param
:type target_object: string
:param
:type data: string
:param
:type upload_id: string
:param
:type part_number: int
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
source_object = convert_utf8(source_object)
source_object = oss_quote(source_object)
method = 'PUT'
params['partNumber'] = part_number
params['uploadId'] = upload_id
headers['x-oss-copy-source'] = "/%s/%s" % (source_bucket, source_object)
body = ''
return self.http_request(method, target_bucket, target_object, headers, body, params)
def complete_upload(self, bucket, object, upload_id, part_msg_xml, headers=None, params=None):
'''
Finish multiupload and merge all the parts in part_msg_xml as a object.
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type part_msg_xml: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'POST'
body = part_msg_xml
headers['Content-Length'] = str(len(body))
params['uploadId'] = upload_id
if not headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(object)
headers['Content-Type'] = content_type
return self.http_request(method, bucket, object, headers, body, params)
def cancel_upload(self, bucket, object, upload_id, headers=None, params=None):
'''
Cancel multiupload and delete all parts of given upload_id
:type bucket: string
:param
:type object: string
:param
:type upload_id: string
:param
:type headers: dict
:param: HTTP header
:type params: dict
:param: HTTP header
Returns:
HTTP Response
'''
if not params:
params = {}
method = 'DELETE'
upload_id = convert_utf8(upload_id)
params['uploadId'] = upload_id
body = ''
return self.http_request(method, bucket, object, headers, body, params)
def multi_upload_file(self, bucket, object, filename, upload_id='', thread_num=10, max_part_num=10000, headers=None, params=None):
'''
Upload large file, the content is read from filename. The large file is splitted into many parts. It will put the many parts into bucket and then merge all the parts into one object.
:type bucket: string
:param
:type object: string
:param
:type fllename: string
:param: the name of the read file
:type upload_id: string
:param
:type thread_num: int
:param
:type max_part_num: int
:param
:type headers: dict
:param
:type params: dict
:param
Returns:
HTTP Response
'''
tmp_headers = {}
if headers and isinstance(headers, dict):
tmp_headers = headers.copy()
if not tmp_headers.has_key('Content-Type'):
content_type = get_content_type_by_filename(filename)
tmp_headers['Content-Type'] = content_type
#get init upload_id
if not upload_id:
res = self.init_multi_upload(bucket, object, tmp_headers, params)
body = res.read()
if res.status == 200:
h = GetInitUploadIdXml(body)
upload_id = h.upload_id
else:
err = ErrorXml(body)
raise Exception("%s, %s" %(res.status, err.msg))
if not upload_id:
raise Exception("-1, Cannot get upload id.")
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
return multi_upload_file2(oss, bucket, object, filename, upload_id, thread_num, max_part_num, self.retry_times, headers, params)
def delete_objects(self, bucket, object_list=None, headers=None, params=None):
'''
Batch delete objects
:type bucket: string
:param:
:type object_list: list
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not object_list:
object_list = []
object_list_xml = create_delete_object_msg_xml(object_list)
return self.batch_delete_object(bucket, object_list_xml, headers, params)
def batch_delete_object(self, bucket, object_list_xml, headers=None, params=None):
'''
Delete the objects in object_list_xml
:type bucket: string
:param:
:type object_list_xml: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'POST'
object = ''
body = object_list_xml
headers['Content-Length'] = str(len(body))
params['delete'] = ''
base64md5 = base64.encodestring(md5.new(body).digest()).strip()
headers['Content-MD5'] = base64md5
return self.http_request(method, bucket, object, headers, body, params)
def list_objects(self, bucket, prefix=''):
'''
:type bucket: string
:param:
:type prefix: string
:param:
Returns:
a list that contains the objects in bucket with prefix
'''
get_instance = GetAllObjects()
marker_input = ''
object_list = []
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
(object_list, marker_output) = get_instance.get_object_in_bucket(oss, bucket, marker_input, prefix)
return object_list
def list_objects_dirs(self, bucket, prefix='', delimiter=''):
'''
:type bucket: string
:param:
:type prefix: string
:param:
:type prefix: delimiter
:param:
Returns:
a list that contains the objects in bucket with prefix
'''
get_instance = GetAllObjects()
marker_input = ''
object_list = []
dir_list = []
oss = OssAPI(self.host, self.access_id, self.secret_access_key)
(object_list, dir_list) = get_instance.get_all_object_dir_in_bucket(oss, bucket, marker_input, prefix, delimiter)
return (object_list, dir_list)
def batch_delete_objects(self, bucket, object_list=None):
'''
:type bucket: string
:param:
:type object_list: object name list
:param:
Returns:
True or False
'''
if not object_list:
object_list = []
object_list_xml = create_delete_object_msg_xml(object_list)
try:
res = self.batch_delete_object(bucket, object_list_xml)
if res.status / 100 == 2:
return True
except:
pass
return False
def get_object_info(self, bucket, object, headers=None, params=None):
'''
Get object information
:type bucket: string
:param:
:type object: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'GET'
body = ''
params['objectInfo'] = ''
return self.http_request(method, bucket, object, headers, body, params)
def options(self, bucket, object='', headers=None, params=None):
'''
Options object to determine if user can send the actual HTTP request
:type bucket: string
:param:
:type object: string
:param:
:type headers: dict
:param: HTTP header
:type params: dict
:param: the parameters that put in the url address as query string
Returns:
HTTP Response
'''
if not headers:
headers = {}
if not params:
params = {}
method = 'OPTIONS'
body = ''
return self.http_request(method, bucket, object, headers, body, params)
| apache-2.0 |
bigzz/linux-ext4 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
iero/Kindle-weather-station | raspberry/geticon.py | 1 | 10537 | #!/usr/bin/python
# Published March 2015
# Author : Greg Fabre - http://www.iero.org
# Based on Noah Blon's work : http://codepen.io/noahblon/details/lxukH
# Public domain source code
def getHome() :
return '<g transform="matrix(6.070005,0,0,5.653153,292.99285,506.46284)"><path d="M 42,48 C 29.995672,48.017555 18.003366,48 6,48 L 6,27 c 0,-0.552 0.447,-1 1,-1 0.553,0 1,0.448 1,1 l 0,19 c 32.142331,0.03306 13.954169,0 32,0 l 0,-18 c 0,-0.552 0.447,-1 1,-1 0.553,0 1,0.448 1,1 z"/><path d="m 47,27 c -0.249,0 -0.497,-0.092 -0.691,-0.277 L 24,5.384 1.691,26.723 C 1.292,27.104 0.659,27.091 0.277,26.692 -0.105,26.293 -0.09,25.66 0.308,25.278 L 24,2.616 47.691,25.277 c 0.398,0.382 0.413,1.015 0.031,1.414 C 47.526,26.896 47.264,27 47,27 Z"/><path d="m 39,15 c -0.553,0 -1,-0.448 -1,-1 L 38,8 32,8 C 31.447,8 31,7.552 31,7 31,6.448 31.447,6 32,6 l 8,0 0,8 c 0,0.552 -0.447,1 -1,1 z" /></g>'
# Forecast.io icons
# clear-day, clear-night, rain, snow, sleet, wind, fog, cloudy, partly-cloudy-day, or partly-cloudy-night.
def getClearDay() :
return '<path d="M71.997,51.999h-3.998c-1.105,0-2-0.895-2-1.999s0.895-2,2-2h3.998 c1.105,0,2,0.896,2,2S73.103,51.999,71.997,51.999z M64.142,38.688c-0.781,0.781-2.049,0.781-2.828,0 c-0.781-0.781-0.781-2.047,0-2.828l2.828-2.828c0.779-0.781,2.047-0.781,2.828,0c0.779,0.781,0.779,2.047,0,2.828L64.142,38.688z M50.001,61.998c-6.627,0-12-5.372-12-11.998c0-6.627,5.372-11.999,12-11.999c6.627,0,11.998,5.372,11.998,11.999 C61.999,56.626,56.628,61.998,50.001,61.998z M50.001,42.001c-4.418,0-8,3.581-8,7.999c0,4.417,3.583,7.999,8,7.999 s7.998-3.582,7.998-7.999C57.999,45.582,54.419,42.001,50.001,42.001z M50.001,34.002c-1.105,0-2-0.896-2-2v-3.999 c0-1.104,0.895-2,2-2c1.104,0,2,0.896,2,2v3.999C52.001,33.106,51.104,34.002,50.001,34.002z M35.86,38.688l-2.828-2.828 c-0.781-0.781-0.781-2.047,0-2.828s2.047-0.781,2.828,0l2.828,2.828c0.781,0.781,0.781,2.047,0,2.828S36.641,39.469,35.86,38.688z M34.002,50c0,1.104-0.896,1.999-2,1.999h-4c-1.104,0-1.999-0.895-1.999-1.999s0.896-2,1.999-2h4C33.107,48,34.002,48.896,34.002,50 z M35.86,61.312c0.781-0.78,2.047-0.78,2.828,0c0.781,0.781,0.781,2.048,0,2.828l-2.828,2.828c-0.781,0.781-2.047,0.781-2.828,0 c-0.781-0.78-0.781-2.047,0-2.828L35.86,61.312z M50.001,65.998c1.104,0,2,0.895,2,1.999v4c0,1.104-0.896,2-2,2 c-1.105,0-2-0.896-2-2v-4C48.001,66.893,48.896,65.998,50.001,65.998z M64.142,61.312l2.828,2.828c0.779,0.781,0.779,2.048,0,2.828 c-0.781,0.781-2.049,0.781-2.828,0l-2.828-2.828c-0.781-0.78-0.781-2.047,0-2.828C62.093,60.531,63.36,60.531,64.142,61.312z" />'
def getClearNight() :
return '<path d="M50,61.998c-6.627,0-11.999-5.372-11.999-11.998 c0-6.627,5.372-11.999,11.999-11.999c0.755,0,1.491,0.078,2.207,0.212c-0.132,0.576-0.208,1.173-0.208,1.788 c0,4.418,3.582,7.999,8,7.999c0.615,0,1.212-0.076,1.788-0.208c0.133,0.717,0.211,1.452,0.211,2.208 C61.998,56.626,56.626,61.998,50,61.998z M48.212,42.208c-3.556,0.813-6.211,3.989-6.211,7.792c0,4.417,3.581,7.999,7.999,7.999 c3.802,0,6.978-2.655,7.791-6.211C52.937,50.884,49.115,47.062,48.212,42.208z" />'
def getRain() :
return '<path d="m 59.999,65.64 c -0.266,0 -0.614,0 -1,0 0,-1.372 -0.319,-2.742 -0.943,-4 0.777,0 1.451,0 1.943,0 4.418,0 7.999,-3.58 7.999,-7.998 0,-4.418 -3.581,-7.999 -7.999,-7.999 -1.6,0 -3.083,0.481 -4.334,1.29 -1.231,-5.316 -5.973,-9.289 -11.664,-9.289 -6.627,0 -11.998,5.372 -11.998,11.998 0,5.953 4.339,10.879 10.023,11.822 -0.637,1.217 -0.969,2.549 -1.012,3.887 -7.406,-1.399 -13.012,-7.895 -13.012,-15.709 0,-8.835 7.162,-15.998 15.998,-15.998 6.004,0 11.229,3.312 13.965,8.204 0.664,-0.114 1.337,-0.205 2.033,-0.205 6.627,0 11.998,5.372 11.998,11.999 0,6.627 -5.37,11.998 -11.997,11.998 z m -9.998,-7.071 3.535,3.535 c 1.951,1.953 1.951,5.118 0,7.07 -1.953,1.953 -5.119,1.953 -7.07,0 -1.953,-1.952 -1.953,-5.117 0,-7.07 l 3.535,-3.535 z" />'
def getSnow() :
return '<path d="M63.999,64.943v-4.381c2.389-1.385,3.999-3.963,3.999-6.922 c0-4.416-3.581-7.998-7.999-7.998c-1.6,0-3.083,0.48-4.333,1.291c-1.231-5.317-5.974-9.291-11.665-9.291 c-6.627,0-11.998,5.373-11.998,12c0,3.549,1.55,6.729,4,8.924v4.916c-4.777-2.768-8-7.922-8-13.84 c0-8.836,7.163-15.999,15.998-15.999c6.004,0,11.229,3.312,13.965,8.204c0.664-0.113,1.337-0.205,2.033-0.205 c6.627,0,11.999,5.373,11.999,11.998C71.998,58.863,68.655,63.293,63.999,64.943z M42.001,57.641c1.105,0,2,0.896,2,2 c0,1.105-0.895,2-2,2c-1.104,0-1.999-0.895-1.999-2C40.002,58.537,40.897,57.641,42.001,57.641z M42.001,65.641c1.105,0,2,0.895,2,2 c0,1.104-0.895,1.998-2,1.998c-1.104,0-1.999-0.895-1.999-1.998C40.002,66.535,40.897,65.641,42.001,65.641z M50.001,61.641 c1.104,0,2,0.895,2,2c0,1.104-0.896,2-2,2c-1.105,0-2-0.896-2-2C48.001,62.535,48.896,61.641,50.001,61.641z M50.001,69.639 c1.104,0,2,0.896,2,2c0,1.105-0.896,2-2,2c-1.105,0-2-0.895-2-2C48.001,70.535,48.896,69.639,50.001,69.639z M57.999,57.641 c1.105,0,2,0.896,2,2c0,1.105-0.895,2-2,2c-1.104,0-1.999-0.895-1.999-2C56,58.537,56.896,57.641,57.999,57.641z M57.999,65.641 c1.105,0,2,0.895,2,2c0,1.104-0.895,1.998-2,1.998c-1.104,0-1.999-0.895-1.999-1.998C56,66.535,56.896,65.641,57.999,65.641z" />'
def getSleet() :
return getSnow()
def getWind() :
return '<path d="m 36.487886,31.712413 -7.4209,5.614747 -1.239742,0 0,-1.686046 -3.613959,0 0,32.148333 3.613959,0 0,-28.954574 1.286522,0 6.438465,4.155668 0.935655,0.04863 c 6.772487,-0.02017 8.174561,5.572594 20.993709,5.571513 4.65253,10e-4 6.520094,-1.29179 9.210331,-1.280746 4.597097,-0.01101 8.812682,2.102152 8.812682,2.102152 l 2.473633,-7.122458 c 0,0 -6.264433,-4.48985 -16.68386,-4.479907 -0.702187,-0.0099 -2.173664,0.189825 -3.070114,0.183735 -8.933613,0.006 -4.236867,-6.314021 -21.736381,-6.301051 z m -0.09357,1.048376 -0.742677,9.408344 -6.286419,-4.112434 7.029096,-5.29591 z" />'
def getFog() :
return '<path d="M29.177,55.641c-0.262-0.646-0.473-1.315-0.648-2h43.47 c0,0.684-0.07,1.348-0.181,2H29.177z M36.263,35.643c2.294-1.271,4.93-1.999,7.738-1.999c2.806,0,5.436,0.73,7.727,1.999H36.263z M28.142,47.642c0.085-0.682,0.218-1.347,0.387-1.999h40.396c0.551,0.613,1.039,1.281,1.455,1.999H28.142z M29.177,43.643 c0.281-0.693,0.613-1.359,0.984-2h27.682c0.04,0.068,0.084,0.135,0.123,0.205c0.664-0.114,1.338-0.205,2.033-0.205 c2.451,0,4.729,0.738,6.627,2H29.177z M31.524,39.643c0.58-0.723,1.225-1.388,1.92-2h21.122c0.69,0.61,1.326,1.28,1.903,2H31.524z M71.817,51.641H28.142c-0.082-0.656-0.139-1.32-0.139-1.999h43.298C71.528,50.285,71.702,50.953,71.817,51.641z M71.301,57.641 c-0.247,0.699-0.555,1.367-0.921,2H31.524c-0.505-0.629-0.957-1.299-1.363-2H71.301z M33.444,61.641h35.48 c-0.68,0.758-1.447,1.434-2.299,1.999H36.263C35.247,63.078,34.309,62.4,33.444,61.641z" />'
def getCloudy() :
return '<path d="M43.945,65.639c-8.835,0-15.998-7.162-15.998-15.998 c0-8.836,7.163-15.998,15.998-15.998c6.004,0,11.229,3.312,13.965,8.203c0.664-0.113,1.338-0.205,2.033-0.205 c6.627,0,11.999,5.373,11.999,12c0,6.625-5.372,11.998-11.999,11.998C57.168,65.639,47.143,65.639,43.945,65.639z M59.943,61.639 c4.418,0,8-3.582,8-7.998c0-4.418-3.582-8-8-8c-1.6,0-3.082,0.481-4.333,1.291c-1.231-5.316-5.974-9.29-11.665-9.29 c-6.626,0-11.998,5.372-11.998,11.999c0,6.626,5.372,11.998,11.998,11.998C47.562,61.639,56.924,61.639,59.943,61.639z" />'
def getPartlyCloudyDay() :
return '<path d="m 70.964271,47.439013 -3.309389,0 c -0.913392,0 -1.654695,-0.740476 -1.654695,-1.654695 0,-0.913391 0.741303,-1.65304 1.654695,-1.65304 l 3.309389,0 c 0.913392,0 1.654695,0.740476 1.654695,1.65304 0,0.914219 -0.741303,1.654695 -1.654695,1.654695 z M 64.463803,36.425365 c -0.646158,0.646158 -1.69358,0.646158 -2.339738,0 -0.646158,-0.645331 -0.646158,-1.69358 0,-2.338911 l 2.339738,-2.339739 c 0.646158,-0.646158 1.69358,-0.646158 2.339738,0 0.646159,0.645331 0.646159,1.69358 0,2.339739 l -2.339738,2.338911 z m -2.438193,12.91241 0,0 c 1.447031,1.725847 2.321537,3.946447 2.321537,6.374711 0,5.481177 -4.44451,9.926514 -9.927341,9.926514 -2.295889,0 -10.590873,0 -13.235903,0 -7.309614,0 -13.235903,-5.925462 -13.235903,-13.235903 0,-7.310441 5.926289,-13.235903 13.235903,-13.235903 1.30059,0 2.556503,0.191944 3.742092,0.541085 1.816028,-2.338911 4.648038,-3.850475 7.839116,-3.850475 5.482831,0 9.927341,4.445338 9.927341,9.926514 -8.27e-4,1.253431 -0.24324,2.449776 -0.666842,3.553457 z m -30.769048,3.065322 c 0,5.482831 4.443683,9.926514 9.926514,9.926514 2.991688,0 10.738141,0 13.235903,0 3.65522,0 6.617951,-2.963559 6.617951,-6.617125 0,-3.65522 -2.962731,-6.618779 -6.617951,-6.618779 -1.323756,0 -2.550712,0.398782 -3.584896,1.068106 -1.018465,-4.398179 -4.942573,-7.68523 -9.651007,-7.68523 -5.482831,0 -9.926514,4.443683 -9.926514,9.926514 z M 52.764284,39.167194 c -1.830092,0 -3.487269,0.742958 -4.684441,1.943439 1.935993,1.188071 3.545184,2.85683 4.657139,4.843291 0.549358,-0.09349 1.106163,-0.169606 1.681997,-0.169606 1.758113,0 3.407844,0.462487 4.839982,1.263359 l 0,0 c 0.07943,-0.408709 0.124102,-0.830656 0.124102,-1.263359 0,-3.653566 -2.963558,-6.617124 -6.618779,-6.617124 z m 0,-6.618779 c -0.913391,0 -1.653867,-0.740476 -1.653867,-1.653867 l 0,-3.308563 c 0,-0.914218 0.741303,-1.654694 1.653867,-1.654694 0.914219,0 1.654695,0.740476 1.654695,1.654694 l 0,3.308563 c 0,0.914218 -0.739649,1.653867 -1.654695,1.653867 z m -11.698692,3.87695 -2.338911,-2.338911 c -0.646158,-0.646159 -0.646158,-1.694408 0,-2.339739 0.645331,-0.646158 1.69358,-0.646158 2.338911,0 l 2.339739,2.339739 c 0.646158,0.645331 0.646158,1.69358 0,2.338911 -0.645331,0.646158 -1.69358,0.646158 -2.339739,0 z" />'
def getPartlyCloudyNight() :
return '<path d="M69.763,46.758L69.763,46.758c1.368,1.949,2.179,4.318,2.179,6.883 c0,6.625-5.371,11.998-11.998,11.998c-2.775,0-12.801,0-15.998,0c-8.836,0-15.998-7.162-15.998-15.998s7.162-15.998,15.998-15.998 c2.002,0,3.914,0.375,5.68,1.047l0,0c1.635-4.682,6.078-8.047,11.318-8.047c0.755,0,1.491,0.078,2.207,0.212 c-0.131,0.575-0.207,1.173-0.207,1.788c0,4.418,3.581,7.999,7.998,7.999c0.616,0,1.213-0.076,1.789-0.208 c0.133,0.717,0.211,1.453,0.211,2.208C72.941,41.775,71.73,44.621,69.763,46.758z M31.947,49.641 c0,6.627,5.371,11.998,11.998,11.998c3.616,0,12.979,0,15.998,0c4.418,0,7.999-3.582,7.999-7.998c0-4.418-3.581-8-7.999-8 c-1.6,0-3.083,0.482-4.334,1.291c-1.231-5.316-5.973-9.29-11.664-9.29C37.318,37.642,31.947,43.014,31.947,49.641z M51.496,35.545 c0.001,0,0.002,0,0.002,0S51.497,35.545,51.496,35.545z M59.155,30.85c-2.9,0.664-5.175,2.91-5.925,5.775l0,0 c1.918,1.372,3.523,3.152,4.68,5.22c0.664-0.113,1.337-0.205,2.033-0.205c2.618,0,5.033,0.85,7.005,2.271l0,0 c0.858-0.979,1.485-2.168,1.786-3.482C63.881,39.525,60.059,35.706,59.155,30.85z" />'
| mit |
mr-karan/coala | tests/coalaCITest.py | 6 | 3184 | import os
import re
import sys
import unittest
from coalib import coala_ci
from coalib.misc.ContextManagers import prepare_file
from tests.TestUtilities import bear_test_module, execute_coala
class coalaCITest(unittest.TestCase):
def setUp(self):
self.old_argv = sys.argv
self.unescaped_coafile = os.path.abspath("./.coafile")
self.coafile = re.escape(self.unescaped_coafile)
def tearDown(self):
sys.argv = self.old_argv
def test_nonexistent(self):
retval, output = execute_coala(
coala_ci.main, "coala-ci", "-c", 'nonex', "test")
self.assertRegex(
output,
".*\\[ERROR\\].*The requested coafile '.*' does not exist. .+\n")
def test_find_no_issues(self):
with bear_test_module(), \
prepare_file(["#include <a>"], None) as (lines, filename):
retval, output = execute_coala(coala_ci.main, "coala-ci",
'-c', os.devnull,
'-f', re.escape(filename),
'-b', 'SpaceConsistencyTestBear',
"--settings", "use_spaces=True")
self.assertIn("Executing section Default", output)
self.assertEqual(retval, 0,
"coala-ci must return zero when successful")
def test_find_issues(self):
with bear_test_module(), \
prepare_file(["#fixme"], None) as (lines, filename):
retval, output = execute_coala(coala_ci.main, "coala-ci",
"-c", os.devnull,
"-b", "LineCountTestBear",
"-f", re.escape(filename))
self.assertIn("This file has 1 lines.",
output,
"The output should report count as 1 lines")
self.assertNotEqual(retval, 0,
"coala-ci was expected to return non-zero")
def test_fix_patchable_issues(self):
with bear_test_module(), \
prepare_file(["\t#include <a>"], None) as (lines, filename):
retval, output = execute_coala(
coala_ci.main, "coala-ci",
"-c", os.devnull,
"-f", re.escape(filename),
"-b", "SpaceConsistencyTestBear",
"--settings", "autoapply=true", "use_spaces=True",
"default_actions=SpaceConsistencyTestBear:ApplyPatchAction")
self.assertIn("Applied 'ApplyPatchAction'", output)
self.assertEqual(retval, 5,
"coala-ci must return exitcode 5 when it "
"autofixes the code.")
def test_fail_acquire_settings(self):
with bear_test_module():
retval, output = execute_coala(coala_ci.main, "coala-ci",
"-b", 'SpaceConsistencyTestBear',
'-c', os.devnull)
self.assertIn("During execution, we found that some", output)
| agpl-3.0 |
LICEF/edx-platform | common/test/acceptance/setup.py | 206 | 1111 | #!/usr/bin/env python
"""
Install bok-choy page objects for acceptance and end-to-end tests.
"""
import os
from setuptools import setup
VERSION = '0.0.1'
DESCRIPTION = "Bok-choy page objects for edx-platform"
# Pip 1.5 will try to install this package from outside
# the directory containing setup.py, so we need to use an absolute path.
PAGES_PACKAGE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pages')
setup(
name='edxapp-pages',
version=VERSION,
author='edX',
url='http://github.com/edx/edx-platform',
description=DESCRIPTION,
license='AGPL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance'
],
package_dir={'edxapp_pages': PAGES_PACKAGE_DIR},
packages=['edxapp_pages', 'edxapp_pages.lms', 'edxapp_pages.studio']
)
| agpl-3.0 |
kondalrao/fsm_engine | fsm_engine/fsm_engine_old.py | 1 | 3020 | __author__ = 'Kondal Rao Komaragiri'
import multiprocessing
import select
import logging
import FSM
TIMEOUT = 5000
class FsmEngine(object):
def __init__(self, dispatch=None, flags=None):
self.fsm_object_dict = {}
self.dispatch = dispatch
self.flags = flags
self.logger = None
self.rfd = []
self.__engineStarted = False
self.__init_log()
def __init_log(self):
# multiprocessing.log_to_stderr(logging.DEBUG)
multiprocessing.log_to_stderr()
self.logger = multiprocessing.get_logger()
self.logger.setLevel(logging.CRITICAL)
def __idle(self):
# self.logger.debug("FsmEngine.__idle")
pass
def __collect_stats(self):
# self.logger.debug("FsmEngine.__collectStats")
pass
def generate_fsm_id(self):
self.logger.debug("FsmEngine.__getNewFSMId")
fsm_id = [0]
for obj in self.fsm_object_dict.values():
if obj.obj_type == fsm.FSMQ:
fsm_id.append(obj.fsm.getId())
new_fsm_id = max(fsm_id) + 1
self.logger.debug("FsmEngine.__getNewFSMId: new_fsm_id: %d" % new_fsm_id)
def __register(self, fsm_object):
self.fsm_object_dict[fsm_object.fd] = fsm_object
self.rfd.append(fsm_object.fd)
def __unregister(self, fsm_object):
self.fsm_object_dict.pop(fsm_object.fd)
self.rfd.remove(fsm_object.fd)
def add_fsm(self, fsm_inst):
self.logger.info("FsmEngine.addFSM: Adding FSM %s" % fsm_inst.fsm_name)
# Check if the fsm_id is already present in the fsmEngine
if fsm_inst.get_id() == -1:
fsm_inst.set_id(self.generate_fsm_id())
# for fsm_object in fsm.get_fsm_objects():
# self.add_fsm_object(fsm_object)
map(self.add_fsm_object, fsm_inst.get_fsm_objects())
fsm_inst.set_fsm_engine(self)
fsm_inst.generate_initial_event()
def remove_fsm(self, fsm_inst):
self.logger.info("FsmEngine.addFSM: Adding FSM %s" % fsm_inst.fsm_name)
map(self.del_fsm_object, fsm_inst.get_fsm_objects())
def add_fsm_object(self, fsm_object):
self.__register(fsm_object)
def del_fsm_object(self, fsm_object):
self.__unregister(fsm_object)
def start_engine(self):
self.logger.info("Starting fsm engine")
self.__engineStarted = True
while True:
try:
rfdl, wfdl, errfdl = select.select(self.rfd, [], [], TIMEOUT)
except KeyboardInterrupt:
exit()
self.__collect_stats()
if len(rfdl) == 0:
self.__idle()
continue
# TODO: sort the events based on priority
for fd in rfdl:
self.logger.debug("fsmEngine.start_engine: fd: %s" % fd)
fsm_object = self.fsm_object_dict[fd]
if callable(fsm_object.dispatch_func):
fsm_object.dispatch_func(fsm_object)
| gpl-2.0 |
UOMx/edx-platform | openedx/core/djangoapps/site_configuration/tests/test_models.py | 7 | 3462 | """
Tests for site configuration's django models.
"""
from django.test import TestCase
from django.db import IntegrityError, transaction
from django.contrib.sites.models import Site
from openedx.core.djangoapps.site_configuration.models import SiteConfigurationHistory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory
class SiteConfigurationTests(TestCase):
"""
Tests for SiteConfiguration and its signals/receivers.
"""
domain = 'site_configuration_post_save_receiver_example.com'
name = 'site_configuration_post_save_receiver_example'
@classmethod
def setUpClass(cls):
super(SiteConfigurationTests, cls).setUpClass()
cls.site, _ = Site.objects.get_or_create(domain=cls.domain, name=cls.domain)
def test_site_configuration_post_save_receiver(self):
"""
Test that and entry is added to SiteConfigurationHistory model each time a new
SiteConfiguration is added.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure an entry (and only one entry) is saved for SiteConfiguration
self.assertEqual(len(site_configuration_history), 1)
def test_site_configuration_post_update_receiver(self):
"""
Test that and entry is added to SiteConfigurationHistory each time a
SiteConfiguration is updated.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration.values = {'test': 'test'}
site_configuration.save()
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure two entries (one for save and one for update) are saved for SiteConfiguration
self.assertEqual(len(site_configuration_history), 2)
def test_no_entry_is_saved_for_errors(self):
"""
Test that and entry is not added to SiteConfigurationHistory if there is an error while
saving SiteConfiguration.
"""
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure entry is saved if there is no error
self.assertEqual(len(site_configuration_history), 1)
with transaction.atomic():
with self.assertRaises(IntegrityError):
# try to add a duplicate entry
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure no entry is saved if there an error
self.assertEqual(len(site_configuration_history), 1)
| agpl-3.0 |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/twisted/conch/test/test_keys.py | 12 | 54777 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.keys}.
"""
from __future__ import absolute_import, division
try:
import cryptography
except ImportError:
cryptography = None
skipCryptography = 'Cannot run without cryptography.'
try:
import Crypto.Cipher.DES3
import Crypto.PublicKey.RSA
import Crypto.PublicKey.DSA
except ImportError:
# we'll have to skip some tests without PyCypto
Crypto = None
skipPyCrypto = 'Cannot run without PyCrypto.'
try:
import pyasn1
except ImportError:
pyasn1 = None
if cryptography and pyasn1:
from twisted.conch.ssh import keys, common, sexpy
import base64
import os
from twisted.conch.test import keydata
from twisted.python import randbytes
from twisted.trial import unittest
from twisted.python.compat import long, _PY3
from incremental import Version
from twisted.python.filepath import FilePath
class ObjectTypeTests(unittest.TestCase):
"""
Unit tests for the objectType method.
"""
if cryptography is None:
skip = skipCryptography
if Crypto is None:
skip = "Cannot run without PyCrypto."
if _PY3:
skip = "objectType is deprecated and is not being ported to Python 3."
def getRSAKey(self):
"""
Return a PyCrypto RSA key to support the tests.
@return: The RSA key to support the tests.
@rtype: C{Crypto.PublicKey.RSA}
"""
# Use lazy import as PyCrypto will be deprecated.
from Crypto.PublicKey import RSA
return RSA.construct((
keydata.RSAData['n'],
keydata.RSAData['e'],
keydata.RSAData['d'],
))
def getDSAKey(self):
"""
Return a PyCrypto DSA key to support the tests.
@return: The DSA key to support the tests.
@rtype: C{Crypto.PublicKey.DSA}
"""
# Use lazy import as PyCrypto will be deprecated.
from Crypto.PublicKey import DSA
return DSA.construct((
keydata.DSAData['y'],
keydata.DSAData['g'],
keydata.DSAData['p'],
keydata.DSAData['q'],
keydata.DSAData['x'],
))
def checkDeprecation(self):
"""
Check that we have a deprecation warning for C{objectType}.
"""
warnings = self.flushWarnings()
self.assertEqual(1, len(warnings))
self.assertIs(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
'twisted.conch.ssh.keys.objectType was deprecated in '
'Twisted 15.5.0',
warnings[0]['message'])
def test_objectType_rsa(self):
"""
C{ssh-rsa} is the type of the RSA keys.
"""
key = self.getRSAKey()
self.assertEqual(keys.objectType(key), b'ssh-rsa')
self.checkDeprecation()
def test_objectType_dsa(self):
"""
C{ssh-dss} is the type of the DSA keys.
"""
key = self.getDSAKey()
self.assertEqual(keys.objectType(key), b'ssh-dss')
self.checkDeprecation()
def test_objectKey_none(self):
"""
A BadKeyError is raised when getting the type of L{None}.
"""
self.assertRaises(keys.BadKeyError, keys.objectType, None)
self.checkDeprecation()
def test_deprecation(self):
"""
It is deprecated.
"""
key = self.getRSAKey()
keys.objectType(key)
self.checkDeprecation()
class KeyTests(unittest.TestCase):
if cryptography is None:
skip = skipCryptography
if pyasn1 is None:
skip = "Cannot run without PyASN1"
def setUp(self):
self.rsaObj = keys.Key._fromRSAComponents(
n=keydata.RSAData['n'],
e=keydata.RSAData['e'],
d=keydata.RSAData['d'],
p=keydata.RSAData['p'],
q=keydata.RSAData['q'],
u=keydata.RSAData['u'],
)._keyObject
self.dsaObj = keys.Key._fromDSAComponents(
y=keydata.DSAData['y'],
p=keydata.DSAData['p'],
q=keydata.DSAData['q'],
g=keydata.DSAData['g'],
x=keydata.DSAData['x'],
)._keyObject
self.ecObj = keys.Key._fromECComponents(
x=keydata.ECDatanistp256['x'],
y=keydata.ECDatanistp256['y'],
privateValue=keydata.ECDatanistp256['privateValue'],
curve=keydata.ECDatanistp256['curve']
)._keyObject
self.ecObj384 = keys.Key._fromECComponents(
x=keydata.ECDatanistp384['x'],
y=keydata.ECDatanistp384['y'],
privateValue=keydata.ECDatanistp384['privateValue'],
curve=keydata.ECDatanistp384['curve']
)._keyObject
self.ecObj521 = keys.Key._fromECComponents(
x=keydata.ECDatanistp521['x'],
y=keydata.ECDatanistp521['y'],
privateValue=keydata.ECDatanistp521['privateValue'],
curve=keydata.ECDatanistp521['curve']
)._keyObject
self.rsaSignature = (b'\x00\x00\x00\x07ssh-rsa\x00'
b'\x00\x00`N\xac\xb4@qK\xa0(\xc3\xf2h \xd3\xdd\xee6Np\x9d_'
b'\xb0>\xe3\x0c(L\x9d{\txUd|!\xf6m\x9c\xd3\x93\x842\x7fU'
b'\x05\xf4\xf7\xfaD\xda\xce\x81\x8ea\x7f=Y\xed*\xb7\xba\x81'
b'\xf2\xad\xda\xeb(\x97\x03S\x08\x81\xc7\xb1\xb7\xe6\xe3'
b'\xcd*\xd4\xbd\xc0wt\xf7y\xcd\xf0\xb7\x7f\xfb\x1e>\xf9r'
b'\x8c\xba')
self.dsaSignature = (
b'\x00\x00\x00\x07ssh-dss\x00\x00\x00(?\xc7\xeb\x86;\xd5TFA\xb4'
b'\xdf\x0c\xc4E@4,d\xbc\t\xd9\xae\xdd[\xed-\x82nQ\x8cf\x9b\xe8\xe1'
b'jrg\x84p<'
)
self.patch(randbytes, 'secureRandom', lambda x: b'\xff' * x)
self.keyFile = self.mktemp()
with open(self.keyFile, 'wb') as f:
f.write(keydata.privateRSA_lsh)
def tearDown(self):
os.unlink(self.keyFile)
def test_size(self):
"""
The L{keys.Key.size} method returns the size of key object in bits.
"""
self.assertEqual(keys.Key(self.rsaObj).size(), 768)
self.assertEqual(keys.Key(self.dsaObj).size(), 1024)
self.assertEqual(keys.Key(self.ecObj).size(), 256)
self.assertEqual(keys.Key(self.ecObj384).size(), 384)
self.assertEqual(keys.Key(self.ecObj521).size(), 521)
def test__guessStringType(self):
"""
Test that the _guessStringType method guesses string types
correctly.
"""
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicECDSA_openssh),
'public_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateECDSA_openssh), 'private_openssh')
self.assertEqual(keys.Key._guessStringType(keydata.publicRSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.publicDSA_lsh),
'public_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateRSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(keydata.privateDSA_lsh),
'private_lsh')
self.assertEqual(keys.Key._guessStringType(
keydata.privateRSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
keydata.privateDSA_agentv3), 'agentv3')
self.assertEqual(keys.Key._guessStringType(
b'\x00\x00\x00\x07ssh-rsa\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType(
b'\x00\x00\x00\x07ssh-dss\x00\x00\x00\x01\x01'),
'blob')
self.assertEqual(keys.Key._guessStringType(b'not a key'),
None)
def test_isPublic(self):
"""
The L{keys.Key.isPublic} method returns True for public keys
otherwise False.
"""
rsaKey = keys.Key.fromString(keydata.privateRSA_openssh)
dsaKey = keys.Key.fromString(keydata.privateDSA_openssh)
ecdsaKey = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertTrue(rsaKey.public().isPublic())
self.assertFalse(rsaKey.isPublic())
self.assertTrue(dsaKey.public().isPublic())
self.assertFalse(dsaKey.isPublic())
self.assertTrue(ecdsaKey.public().isPublic())
self.assertFalse(ecdsaKey.isPublic())
def _testPublicPrivateFromString(self, public, private, type, data):
self._testPublicFromString(public, type, data)
self._testPrivateFromString(private, type, data)
def _testPublicFromString(self, public, type, data):
publicKey = keys.Key.fromString(public)
self.assertTrue(publicKey.isPublic())
self.assertEqual(publicKey.type(), type)
for k, v in publicKey.data().items():
self.assertEqual(data[k], v)
def _testPrivateFromString(self, private, type, data):
privateKey = keys.Key.fromString(private)
self.assertFalse(privateKey.isPublic())
self.assertEqual(privateKey.type(), type)
for k, v in data.items():
self.assertEqual(privateKey.data()[k], v)
def test_fromOpenSSH(self):
"""
Test that keys are correctly generated from OpenSSH strings.
"""
self._testPublicPrivateFromString(keydata.publicECDSA_openssh,
keydata.privateECDSA_openssh, 'EC', keydata.ECDatanistp256)
self._testPublicPrivateFromString(keydata.publicRSA_openssh,
keydata.privateRSA_openssh, 'RSA', keydata.RSAData)
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_encrypted,
passphrase=b'encrypted'),
keys.Key.fromString(keydata.privateRSA_openssh))
self.assertEqual(keys.Key.fromString(
keydata.privateRSA_openssh_alternate),
keys.Key.fromString(keydata.privateRSA_openssh))
self._testPublicPrivateFromString(keydata.publicDSA_openssh,
keydata.privateDSA_openssh, 'DSA', keydata.DSAData)
def test_fromOpenSSHErrors(self):
"""
Tests for invalid key types.
"""
badKey = b"""-----BEGIN FOO PRIVATE KEY-----
MIGkAgEBBDAtAi7I8j73WCX20qUM5hhHwHuFzYWYYILs2Sh8UZ+awNkARZ/Fu2LU
LLl5RtOQpbWgBwYFK4EEACKhZANiAATU17sA9P5FRwSknKcFsjjsk0+E3CeXPYX0
Tk/M0HK3PpWQWgrO8JdRHP9eFE9O/23P8BumwFt7F/AvPlCzVd35VfraFT0o4cCW
G0RqpQ+np31aKmeJshkcYALEchnU+tQ=
-----END EC PRIVATE KEY-----"""
self.assertRaises(keys.BadKeyError,
keys.Key._fromString_PRIVATE_OPENSSH, badKey, None)
def test_fromOpenSSH_with_whitespace(self):
"""
If key strings have trailing whitespace, it should be ignored.
"""
# from bug #3391, since our test key data doesn't have
# an issue with appended newlines
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData + b'\n'))
def test_fromNewerOpenSSH(self):
"""
Newer versions of OpenSSH generate encrypted keys which have a longer
IV than the older versions. These newer keys are also loaded.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh_encrypted_aes,
passphrase=b'testxp')
self.assertEqual(key.type(), 'RSA')
key2 = keys.Key.fromString(
keydata.privateRSA_openssh_encrypted_aes + b'\n',
passphrase=b'testxp')
self.assertEqual(key, key2)
def test_fromOpenSSH_windows_line_endings(self):
"""
Test that keys are correctly generated from OpenSSH strings with Windows
line endings.
"""
privateDSAData = b"""-----BEGIN DSA PRIVATE KEY-----
MIIBuwIBAAKBgQDylESNuc61jq2yatCzZbenlr9llG+p9LhIpOLUbXhhHcwC6hrh
EZIdCKqTO0USLrGoP5uS9UHAUoeN62Z0KXXWTwOWGEQn/syyPzNJtnBorHpNUT9D
Qzwl1yUa53NNgEctpo4NoEFOx8PuU6iFLyvgHCjNn2MsuGuzkZm7sI9ZpQIVAJiR
9dPc08KLdpJyRxz8T74b4FQRAoGAGBc4Z5Y6R/HZi7AYM/iNOM8su6hrk8ypkBwR
a3Dbhzk97fuV3SF1SDrcQu4zF7c4CtH609N5nfZs2SUjLLGPWln83Ysb8qhh55Em
AcHXuROrHS/sDsnqu8FQp86MaudrqMExCOYyVPE7jaBWW+/JWFbKCxmgOCSdViUJ
esJpBFsCgYEA7+jtVvSt9yrwsS/YU1QGP5wRAiDYB+T5cK4HytzAqJKRdC5qS4zf
C7R0eKcDHHLMYO39aPnCwXjscisnInEhYGNblTDyPyiyNxAOXuC8x7luTmwzMbNJ
/ow0IqSj0VF72VJN9uSoPpFd4lLT0zN8v42RWja0M8ohWNf+YNJluPgCFE0PT4Vm
SUrCyZXsNh6VXwjs3gKQ
-----END DSA PRIVATE KEY-----"""
self.assertEqual(
keys.Key.fromString(privateDSAData),
keys.Key.fromString(privateDSAData.replace(b'\n', b'\r\n')))
def test_fromLSHPublicUnsupportedType(self):
"""
C{BadKeyError} exception is raised when public key has an unknown
type.
"""
sexp = sexpy.pack([[b'public-key', [b'bad-key', [b'p', b'2']]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString, data=b'{' + base64.encodestring(sexp) + b'}',
)
def test_fromLSHPrivateUnsupportedType(self):
"""
C{BadKeyError} exception is raised when private key has an unknown
type.
"""
sexp = sexpy.pack([[b'private-key', [b'bad-key', [b'p', b'2']]]])
self.assertRaises(
keys.BadKeyError,
keys.Key.fromString, sexp,
)
def test_fromLSHRSA(self):
"""
RSA public and private keys can be generated from a LSH strings.
"""
self._testPublicPrivateFromString(
keydata.publicRSA_lsh,
keydata.privateRSA_lsh,
'RSA',
keydata.RSAData,
)
def test_fromLSHDSA(self):
"""
DSA public and private key can be generated from LSHs.
"""
self._testPublicPrivateFromString(
keydata.publicDSA_lsh,
keydata.privateDSA_lsh,
'DSA',
keydata.DSAData,
)
def test_fromAgentv3(self):
"""
Test that keys are correctly generated from Agent v3 strings.
"""
self._testPrivateFromString(keydata.privateRSA_agentv3, 'RSA',
keydata.RSAData)
self._testPrivateFromString(keydata.privateDSA_agentv3, 'DSA',
keydata.DSAData)
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
b'\x00\x00\x00\x07ssh-foo'+ b'\x00\x00\x00\x01\x01'*5)
def test_fromStringErrors(self):
"""
keys.Key.fromString should raise BadKeyError when the key is invalid.
"""
self.assertRaises(keys.BadKeyError, keys.Key.fromString, b'')
# no key data with a bad key type
self.assertRaises(keys.BadKeyError, keys.Key.fromString, b'',
'bad_type')
# trying to decrypt a key which doesn't support encryption
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
keydata.publicRSA_lsh, passphrase = b'unencrypted')
# trying to decrypt a key with the wrong passphrase
self.assertRaises(keys.EncryptedKeyError, keys.Key.fromString,
keys.Key(self.rsaObj).toString('openssh', b'encrypted'))
# key with no key data
self.assertRaises(keys.BadKeyError, keys.Key.fromString,
b'-----BEGIN RSA KEY-----\nwA==\n')
# key with invalid DEK Info
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: weird type
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with invalid encryption type
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: FOO-123-BAR,01234567
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (AES)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
# key with bad IV (DES3)
self.assertRaises(
keys.BadKeyError, keys.Key.fromString,
b"""-----BEGIN ENCRYPTED RSA KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,01234
4Ed/a9OgJWHJsne7yOGWeWMzHYKsxuP9w1v0aYcp+puS75wvhHLiUnNwxz0KDi6n
T3YkKLBsoCWS68ApR2J9yeQ6R+EyS+UQDrO9nwqo3DB5BT3Ggt8S1wE7vjNLQD0H
g/SJnlqwsECNhh8aAx+Ag0m3ZKOZiRD5mCkcDQsZET7URSmFytDKOjhFn3u6ZFVB
sXrfpYc6TJtOQlHd/52JB6aAbjt6afSv955Z7enIi+5yEJ5y7oYQTaE5zrFMP7N5
9LbfJFlKXxEddy/DErRLxEjmC+t4svHesoJKc2jjjyNPiOoGGF3kJXea62vsjdNV
gMK5Eged3TBVIk2dv8rtJUvyFeCUtjQ1UJZIebScRR47KrbsIpCmU8I4/uHWm5hW
0mOwvdx1L/mqx/BHqVU9Dw2COhOdLbFxlFI92chkovkmNk4P48ziyVnpm7ME22sE
vfCMsyirdqB1mrL4CSM7FXONv+CgfBfeYVkYW8RfJac9U1L/O+JNn7yee414O/rS
hRYw4UdWnH6Gg6niklVKWNY0ZwUZC8zgm2iqy8YCYuneS37jC+OEKP+/s6HSKuqk
2bzcl3/TcZXNSM815hnFRpz0anuyAsvwPNRyvxG2/DacJHL1f6luV4B0o6W410yf
qXQx01DLo7nuyhJqoH3UGCyyXB+/QUs0mbG2PAEn3f5dVs31JMdbt+PrxURXXjKk
4cexpUcIpqqlfpIRe3RD0sDVbH4OXsGhi2kiTfPZu7mgyFxKopRbn1KwU1qKinfY
EU9O4PoTak/tPT+5jFNhaP+HrURoi/pU8EAUNSktl7xAkHYwkN/9Cm7DeBghgf3n
8+tyCGYDsB5utPD0/Xe9yx0Qhc/kMm4xIyQDyA937dk3mUvLC9vulnAP8I+Izim0
fZ182+D1bWwykoD0997mUHG/AUChWR01V1OLwRyPv2wUtiS8VNG76Y2aqKlgqP1P
V+IvIEqR4ERvSBVFzXNF8Y6j/sVxo8+aZw+d0L1Ns/R55deErGg3B8i/2EqGd3r+
0jps9BqFHHWW87n3VyEB3jWCMj8Vi2EJIfa/7pSaViFIQn8LiBLf+zxG5LTOToK5
xkN42fReDcqi3UNfKNGnv4dsplyTR2hyx65lsj4bRKDGLKOuB1y7iB0AGb0LtcAI
dcsVlcCeUquDXtqKvRnwfIMg+ZunyjqHBhj3qgRgbXbT6zjaSdNnih569aTg0Vup
VykzZ7+n/KVcGLmvX0NesdoI7TKbq4TnEIOynuG5Sf+2GpARO5bjcWKSZeN/Ybgk
gccf8Cqf6XWqiwlWd0B7BR3SymeHIaSymC45wmbgdstrbk7Ppa2Tp9AZku8M2Y7c
8mY9b+onK075/ypiwBm4L4GRNTFLnoNQJXx0OSl4FNRWsn6ztbD+jZhu8Seu10Jw
SEJVJ+gmTKdRLYORJKyqhDet6g7kAxs4EoJ25WsOnX5nNr00rit+NkMPA7xbJT+7
CfI51GQLw7pUPeO2WNt6yZO/YkzZrqvTj5FEwybkUyBv7L0gkqu9wjfDdUw0fVHE
xEm4DxjEoaIp8dW/JOzXQ2EF+WaSOgdYsw3Ac+rnnjnNptCdOEDGP6QBkt+oXj4P
-----END RSA PRIVATE KEY-----""", passphrase='encrypted')
def test_fromFile(self):
"""
Test that fromFile works correctly.
"""
self.assertEqual(keys.Key.fromFile(self.keyFile),
keys.Key.fromString(keydata.privateRSA_lsh))
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, 'bad_type')
self.assertRaises(keys.BadKeyError, keys.Key.fromFile,
self.keyFile, passphrase='unencrypted')
def test_init(self):
"""
Test that the PublicKey object is initialized correctly.
"""
obj = keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject
key = keys.Key(obj)
self.assertEqual(key._keyObject, obj)
def test_equal(self):
"""
Test that Key objects are compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(
keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertTrue(rsa1 == rsa2)
self.assertFalse(rsa1 == rsa3)
self.assertFalse(rsa1 == dsa)
self.assertFalse(rsa1 == object)
self.assertFalse(rsa1 == None)
def test_notEqual(self):
"""
Test that Key objects are not-compared correctly.
"""
rsa1 = keys.Key(self.rsaObj)
rsa2 = keys.Key(self.rsaObj)
rsa3 = keys.Key(
keys.Key._fromRSAComponents(n=long(5), e=long(3))._keyObject)
dsa = keys.Key(self.dsaObj)
self.assertFalse(rsa1 != rsa2)
self.assertTrue(rsa1 != rsa3)
self.assertTrue(rsa1 != dsa)
self.assertTrue(rsa1 != object)
self.assertTrue(rsa1 != None)
def test_dataError(self):
"""
The L{keys.Key.data} method raises RuntimeError for bad keys.
"""
badKey = keys.Key(b'')
self.assertRaises(RuntimeError, badKey.data)
def test_fingerprintdefault(self):
"""
Test that the fingerprint method returns fingerprint in
L{FingerprintFormats.MD5-HEX} format by default.
"""
self.assertEqual(keys.Key(self.rsaObj).fingerprint(),
'3d:13:5f:cb:c9:79:8a:93:06:27:65:bc:3d:0b:8f:af')
self.assertEqual(keys.Key(self.dsaObj).fingerprint(),
'63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1')
def test_fingerprint_md5_hex(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.MD5-HEX} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(
keys.FingerprintFormats.MD5_HEX),
'3d:13:5f:cb:c9:79:8a:93:06:27:65:bc:3d:0b:8f:af')
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(
keys.FingerprintFormats.MD5_HEX),
'63:15:b3:0e:e6:4f:50:de:91:48:3d:01:6b:b3:13:c1')
def test_fingerprintsha256(self):
"""
fingerprint method generates key fingerprint in
L{FingerprintFormats.SHA256-BASE64} format if explicitly specified.
"""
self.assertEqual(
keys.Key(self.rsaObj).fingerprint(
keys.FingerprintFormats.SHA256_BASE64),
'ryaugIFT0B8ItuszldMEU7q14rG/wj9HkRosMeBWkts=')
self.assertEqual(
keys.Key(self.dsaObj).fingerprint(
keys.FingerprintFormats.SHA256_BASE64),
'Wz5o2YbKyxOEcJn1au/UaALSVruUzfz0vaLI1xiIGyY=')
def test_fingerprintBadFormat(self):
"""
A C{BadFingerPrintFormat} error is raised when unsupported
formats are requested.
"""
with self.assertRaises(keys.BadFingerPrintFormat) as em:
keys.Key(self.rsaObj).fingerprint('sha256-base')
self.assertEqual('Unsupported fingerprint format: sha256-base',
em.exception.args[0])
def test_type(self):
"""
Test that the type method returns the correct type for an object.
"""
self.assertEqual(keys.Key(self.rsaObj).type(), 'RSA')
self.assertEqual(keys.Key(self.rsaObj).sshType(), b'ssh-rsa')
self.assertEqual(keys.Key(self.dsaObj).type(), 'DSA')
self.assertEqual(keys.Key(self.dsaObj).sshType(), b'ssh-dss')
self.assertEqual(keys.Key(self.ecObj).type(), 'EC')
self.assertEqual(keys.Key(self.ecObj).sshType(),
keydata.ECDatanistp256['curve'])
self.assertRaises(RuntimeError, keys.Key(None).type)
self.assertRaises(RuntimeError, keys.Key(None).sshType)
self.assertRaises(RuntimeError, keys.Key(self).type)
self.assertRaises(RuntimeError, keys.Key(self).sshType)
def test_fromBlobUnsupportedType(self):
"""
A C{BadKeyError} error is raised whey the blob has an unsupported
key type.
"""
badBlob = common.NS(b'ssh-bad')
self.assertRaises(keys.BadKeyError,
keys.Key.fromString, badBlob)
def test_fromBlobRSA(self):
"""
A public RSA key is correctly generated from a public key blob.
"""
rsaPublicData = {
'n': keydata.RSAData['n'],
'e': keydata.RSAData['e'],
}
rsaBlob = (
common.NS(b'ssh-rsa') +
common.MP(rsaPublicData['e']) +
common.MP(rsaPublicData['n'])
)
rsaKey = keys.Key.fromString(rsaBlob)
self.assertTrue(rsaKey.isPublic())
self.assertEqual(rsaPublicData, rsaKey.data())
def test_fromBlobDSA(self):
"""
A public DSA key is correctly generated from a public key blob.
"""
dsaPublicData = {
'p': keydata.DSAData['p'],
'q': keydata.DSAData['q'],
'g': keydata.DSAData['g'],
'y': keydata.DSAData['y'],
}
dsaBlob = (
common.NS(b'ssh-dss') +
common.MP(dsaPublicData['p']) +
common.MP(dsaPublicData['q']) +
common.MP(dsaPublicData['g']) +
common.MP(dsaPublicData['y'])
)
dsaKey = keys.Key.fromString(dsaBlob)
self.assertTrue(dsaKey.isPublic())
self.assertEqual(dsaPublicData, dsaKey.data())
def test_fromBlobECDSA(self):
"""
Key.fromString generates ECDSA keys from blobs.
"""
from cryptography import utils
ecPublicData = {
'x': keydata.ECDatanistp256['x'],
'y': keydata.ECDatanistp256['y'],
'curve': keydata.ECDatanistp256['curve']
}
ecblob = (common.NS(ecPublicData['curve']) +
common.NS(ecPublicData['curve'][-8:]) +
common.NS(b'\x04' +
utils.int_to_bytes(ecPublicData['x'], 32) +
utils.int_to_bytes(ecPublicData['y'], 32))
)
eckey = keys.Key.fromString(ecblob)
self.assertTrue(eckey.isPublic())
self.assertEqual(ecPublicData, eckey.data())
def test_fromPrivateBlobUnsupportedType(self):
"""
C{BadKeyError} is raised when loading a private blob with an
unsupported type.
"""
badBlob = common.NS(b'ssh-bad')
self.assertRaises(
keys.BadKeyError, keys.Key._fromString_PRIVATE_BLOB, badBlob)
def test_fromPrivateBlobRSA(self):
"""
A private RSA key is correctly generated from a private key blob.
"""
rsaBlob = (
common.NS(b'ssh-rsa') +
common.MP(keydata.RSAData['n']) +
common.MP(keydata.RSAData['e']) +
common.MP(keydata.RSAData['d']) +
common.MP(keydata.RSAData['u']) +
common.MP(keydata.RSAData['p']) +
common.MP(keydata.RSAData['q'])
)
rsaKey = keys.Key._fromString_PRIVATE_BLOB(rsaBlob)
self.assertFalse(rsaKey.isPublic())
self.assertEqual(keydata.RSAData, rsaKey.data())
def test_fromPrivateBlobDSA(self):
"""
A private DSA key is correctly generated from a private key blob.
"""
dsaBlob = (
common.NS(b'ssh-dss') +
common.MP(keydata.DSAData['p']) +
common.MP(keydata.DSAData['q']) +
common.MP(keydata.DSAData['g']) +
common.MP(keydata.DSAData['y']) +
common.MP(keydata.DSAData['x'])
)
dsaKey = keys.Key._fromString_PRIVATE_BLOB(dsaBlob)
self.assertFalse(dsaKey.isPublic())
self.assertEqual(keydata.DSAData, dsaKey.data())
def test_fromPrivateBlobECDSA(self):
"""
A private EC key is correctly generated from a private key blob.
"""
ecblob = (
common.NS(keydata.ECDatanistp256['curve']) +
common.MP(keydata.ECDatanistp256['x']) +
common.MP(keydata.ECDatanistp256['y']) +
common.MP(keydata.ECDatanistp256['privateValue'])
)
eckey = keys.Key._fromString_PRIVATE_BLOB(ecblob)
self.assertFalse(eckey.isPublic())
self.assertEqual(keydata.ECDatanistp256, eckey.data())
def test_blobRSA(self):
"""
Return the over-the-wire SSH format of the RSA public key.
"""
self.assertEqual(
keys.Key(self.rsaObj).blob(),
common.NS(b'ssh-rsa') +
common.MP(self.rsaObj.private_numbers().public_numbers.e) +
common.MP(self.rsaObj.private_numbers().public_numbers.n)
)
def test_blobDSA(self):
"""
Return the over-the-wire SSH format of the DSA public key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).blob(),
common.NS(b'ssh-dss') +
common.MP(publicNumbers.parameter_numbers.p) +
common.MP(publicNumbers.parameter_numbers.q) +
common.MP(publicNumbers.parameter_numbers.g) +
common.MP(publicNumbers.y)
)
def test_blobEC(self):
"""
Return the over-the-wire SSH format of the EC public key.
"""
from cryptography import utils
byteLength = (self.ecObj.curve.key_size + 7) // 8
self.assertEqual(
keys.Key(self.ecObj).blob(),
common.NS(keydata.ECDatanistp256['curve']) +
common.NS(keydata.ECDatanistp256['curve'][-8:]) +
common.NS(b'\x04' +
utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.x, byteLength) +
utils.int_to_bytes(
self.ecObj.private_numbers().public_numbers.y, byteLength))
)
def test_blobNoKey(self):
"""
C{RuntimeError} is raised when the blob is requested for a Key
which is not wrapping anything.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.blob)
def test_privateBlobRSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of an
RSA private key.
"""
from cryptography.hazmat.primitives.asymmetric import rsa
numbers = self.rsaObj.private_numbers()
u = rsa.rsa_crt_iqmp(numbers.q, numbers.p)
self.assertEqual(
keys.Key(self.rsaObj).privateBlob(),
common.NS(b'ssh-rsa') +
common.MP(self.rsaObj.private_numbers().public_numbers.n) +
common.MP(self.rsaObj.private_numbers().public_numbers.e) +
common.MP(self.rsaObj.private_numbers().d) +
common.MP(u) +
common.MP(self.rsaObj.private_numbers().p) +
common.MP(self.rsaObj.private_numbers().q)
)
def test_privateBlobDSA(self):
"""
L{keys.Key.privateBlob} returns the SSH protocol-level format of a DSA
private key.
"""
publicNumbers = self.dsaObj.private_numbers().public_numbers
self.assertEqual(
keys.Key(self.dsaObj).privateBlob(),
common.NS(b'ssh-dss') +
common.MP(publicNumbers.parameter_numbers.p) +
common.MP(publicNumbers.parameter_numbers.q) +
common.MP(publicNumbers.parameter_numbers.g) +
common.MP(publicNumbers.y) +
common.MP(self.dsaObj.private_numbers().x)
)
def test_privateBlobEC(self):
"""
L{keys.Key.privateBlob} returns the SSH ptotocol-level format of EC
private key.
"""
self.assertEqual(
keys.Key(self.ecObj).privateBlob(),
common.NS(keydata.ECDatanistp256['curve']) +
common.MP(self.ecObj.private_numbers().public_numbers.x) +
common.MP(self.ecObj.private_numbers().public_numbers.y) +
common.MP(self.ecObj.private_numbers().private_value)
)
def test_privateBlobNoKeyObject(self):
"""
Raises L{RuntimeError} if the underlying key object does not exists.
"""
badKey = keys.Key(None)
self.assertRaises(RuntimeError, badKey.privateBlob)
def test_toOpenSSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_agentv3)
self.assertEqual(key.toString('openssh'), keydata.privateRSA_openssh)
self.assertEqual(key.toString('openssh', b'encrypted'),
keydata.privateRSA_openssh_encrypted)
self.assertEqual(key.public().toString('openssh'),
keydata.publicRSA_openssh[:-8]) # no comment
self.assertEqual(key.public().toString('openssh', b'comment'),
keydata.publicRSA_openssh)
def test_toOpenSSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_lsh)
self.assertEqual(key.toString('openssh'), keydata.privateDSA_openssh)
self.assertEqual(key.public().toString('openssh', b'comment'),
keydata.publicDSA_openssh)
self.assertEqual(key.public().toString('openssh'),
keydata.publicDSA_openssh[:-8]) # no comment
def test_toOpenSSHECDSA(self):
"""
L{keys.Key.toString} serializes a ECDSA key in OpenSSH format.
"""
key = keys.Key.fromString(keydata.privateECDSA_openssh)
self.assertEqual(key.public().toString('openssh', b'comment'),
keydata.publicECDSA_openssh)
self.assertEqual(key.public().toString('openssh'),
keydata.publicECDSA_openssh[:-8]) # no comment
def test_toLSHRSA(self):
"""
L{keys.Key.toString} serializes an RSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateRSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicRSA_lsh)
def test_toLSHDSA(self):
"""
L{keys.Key.toString} serializes a DSA key in LSH format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('lsh'), keydata.privateDSA_lsh)
self.assertEqual(key.public().toString('lsh'),
keydata.publicDSA_lsh)
def test_toAgentv3RSA(self):
"""
L{keys.Key.toString} serializes an RSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateRSA_agentv3)
def test_toAgentv3DSA(self):
"""
L{keys.Key.toString} serializes a DSA key in Agent v3 format.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
self.assertEqual(key.toString('agentv3'), keydata.privateDSA_agentv3)
def test_toStringErrors(self):
"""
L{keys.Key.toString} raises L{keys.BadKeyError} when passed an invalid
format type.
"""
self.assertRaises(keys.BadKeyError, keys.Key(self.rsaObj).toString,
'bad_type')
def test_signAndVerifyRSA(self):
"""
Signed data can be verified using RSA.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateRSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyDSA(self):
"""
Signed data can be verified using DSA.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateDSA_openssh)
signature = key.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
def test_signAndVerifyEC(self):
"""
Signed data can be verified using EC.
"""
data = b'some-data'
key = keys.Key.fromString(keydata.privateECDSA_openssh)
signature = key.sign(data)
key384 = keys.Key.fromString(keydata.privateECDSA_openssh384)
signature384 = key384.sign(data)
key521 = keys.Key.fromString(keydata.privateECDSA_openssh521)
signature521 = key521.sign(data)
self.assertTrue(key.public().verify(signature, data))
self.assertTrue(key.verify(signature, data))
self.assertTrue(key384.public().verify(signature384, data))
self.assertTrue(key384.verify(signature384, data))
self.assertTrue(key521.public().verify(signature521, data))
self.assertTrue(key521.verify(signature521, data))
def test_verifyRSA(self):
"""
A known-good RSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
self.assertTrue(key.verify(self.rsaSignature, b''))
self.assertFalse(key.verify(self.rsaSignature, b'a'))
self.assertFalse(key.verify(self.dsaSignature, b''))
def test_verifyDSA(self):
"""
A known-good DSA signature verifies successfully.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature, b''))
self.assertFalse(key.verify(self.dsaSignature, b'a'))
self.assertFalse(key.verify(self.rsaSignature, b''))
def test_verifyDSANoPrefix(self):
"""
Some commercial SSH servers send DSA keys as 2 20-byte numbers;
they are still verified as valid keys.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
self.assertTrue(key.verify(self.dsaSignature[-40:], b''))
def test_reprPrivateRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
private key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj)),
"""<RSA Private Key (768 bits)
attr d:
\t6e:1f:b5:55:97:eb:ed:67:ed:2b:99:6e:ec:c1:ed:
\ta8:4d:52:d6:f3:d6:65:06:04:df:e5:54:9f:cc:89:
\t00:3c:9b:67:87:ec:65:a0:ab:cd:6f:65:90:8a:97:
\t90:4d:c6:21:8f:a8:8d:d8:59:86:43:b5:81:b1:b4:
\td7:5f:2c:22:0a:61:c1:25:8a:47:12:b4:9a:f8:7a:
\t11:1c:4a:a8:8b:75:c4:91:09:3b:be:04:ca:45:d9:
\t57:8a:0d:27:cb:23
attr e:
\t23
attr n:
\t00:af:32:71:f0:e6:0e:9c:99:b3:7f:8b:5f:04:4b:
\tcb:8b:c0:d5:3e:b2:77:fd:cf:64:d8:8f:c0:cf:ae:
\t1f:c6:31:df:f6:29:b2:44:96:e2:c6:d4:21:94:7f:
\t65:7c:d8:d4:23:1f:b8:2e:6a:c9:1f:94:0d:46:c1:
\t69:a2:b7:07:0c:a3:93:c1:34:d8:2e:1e:4a:99:1a:
\t6c:96:46:07:46:2b:dc:25:29:1b:87:f0:be:05:1d:
\tee:b4:34:b9:e7:99:95
attr p:
\t00:cb:4a:4b:d0:40:47:e8:45:52:f7:c7:af:0c:20:
\t6d:43:0d:b6:39:94:f9:da:a5:e5:03:06:76:83:24:
\teb:88:a1:55:a2:a8:de:12:3b:77:49:92:8a:a9:71:
\td2:02:93:ff
attr q:
\t00:dc:9f:6b:d9:98:21:56:11:8d:e9:5f:03:9d:0a:
\td3:93:6e:13:77:41:3c:85:4f:00:70:fd:05:54:ff:
\tbc:3d:09:bf:83:f6:97:7f:64:10:91:04:fe:a2:67:
\t47:54:42:6b
attr u:
\t00:b4:73:97:4b:50:10:a3:17:b3:a8:47:f1:3a:14:
\t76:52:d1:38:2a:cf:12:14:34:c1:a8:54:4c:29:35:
\t80:a0:38:b8:f0:fa:4c:c4:c2:85:ab:db:87:82:ba:
\tdc:eb:db:2a>""")
def test_reprPublicRSA(self):
"""
The repr of a L{keys.Key} contains all of the RSA components for an RSA
public key.
"""
self.assertEqual(repr(keys.Key(self.rsaObj).public()),
"""<RSA Public Key (768 bits)
attr e:
\t23
attr n:
\t00:af:32:71:f0:e6:0e:9c:99:b3:7f:8b:5f:04:4b:
\tcb:8b:c0:d5:3e:b2:77:fd:cf:64:d8:8f:c0:cf:ae:
\t1f:c6:31:df:f6:29:b2:44:96:e2:c6:d4:21:94:7f:
\t65:7c:d8:d4:23:1f:b8:2e:6a:c9:1f:94:0d:46:c1:
\t69:a2:b7:07:0c:a3:93:c1:34:d8:2e:1e:4a:99:1a:
\t6c:96:46:07:46:2b:dc:25:29:1b:87:f0:be:05:1d:
\tee:b4:34:b9:e7:99:95>""")
def test_reprPublicECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
public key.
"""
self.assertEqual(repr(keys.Key(self.ecObj).public()),
"""<Elliptic Curve Public Key (256 bits)
curve:
\tecdsa-sha2-nistp256
x:
\t76282513020392096317118503144964731774299773481750550543382904345687059013883
y:""" +
"\n\t8154319786460285263226566476944164753434437589431431968106113715931064" +
"6683104>\n")
def test_reprPrivateECDSA(self):
"""
The repr of a L{keys.Key} contains all the OpenSSH format for an ECDSA
private key.
"""
self.assertEqual(repr(keys.Key(self.ecObj)),
"""<Elliptic Curve Private Key (256 bits)
curve:
\tecdsa-sha2-nistp256
privateValue:
\t34638743477210341700964008455655698253555655678826059678074967909361042656500
x:
\t76282513020392096317118503144964731774299773481750550543382904345687059013883
y:""" +
"\n\t8154319786460285263226566476944164753434437589431431968106113715931064" +
"6683104>\n")
class KeyKeyObjectTests(unittest.TestCase):
"""
The L{keys.Key.keyObject} property provides deprecated access to a PyCrypto
key instance of the corresponding type.
"""
if cryptography is None:
skip = skipCryptography
if Crypto is None:
skip = skipPyCrypto
def test_deprecation(self):
"""
Accessing the L{keys.Key.keyObject} property emits a deprecation
warning.
"""
keys.Key.fromString(keydata.publicRSA_openssh).keyObject
[warning] = self.flushWarnings([KeyKeyObjectTests.test_deprecation])
self.assertIs(warning['category'], DeprecationWarning)
def test_keyObjectGetRSAPublic(self):
"""
The PyCrypto key instance for an RSA public key has the same components
as the internal key.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
result = key.keyObject
self.assertIsInstance(result, Crypto.PublicKey.RSA._RSAobj)
self.assertEqual(keydata.RSAData['e'], result.key.e)
self.assertEqual(keydata.RSAData['n'], result.key.n)
def test_keyObjectGetRSAPrivate(self):
"""
The PyCrypto key instance for an RSA private key has the same
components as the internal key.
"""
key = keys.Key.fromString(keydata.privateRSA_openssh)
result = key.keyObject
self.assertIsInstance(result, Crypto.PublicKey.RSA._RSAobj)
self.assertEqual(keydata.RSAData['e'], result.key.e)
self.assertEqual(keydata.RSAData['n'], result.key.n)
self.assertEqual(keydata.RSAData['d'], result.key.d)
self.assertEqual(keydata.RSAData['p'], result.key.p)
self.assertEqual(keydata.RSAData['q'], result.key.q)
self.assertEqual(keydata.RSAData['u'], result.key.u)
def test_keyObjectGetDSAPublic(self):
"""
The PyCrypto key instance for a DSA public key has the same components
as the internal key.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
result = key.keyObject
self.assertIsInstance(result, Crypto.PublicKey.DSA._DSAobj)
self.assertEqual(keydata.DSAData['y'], result.key.y)
self.assertEqual(keydata.DSAData['g'], result.key.g)
self.assertEqual(keydata.DSAData['p'], result.key.p)
self.assertEqual(keydata.DSAData['q'], result.key.q)
def test_keyObjectGetDSAPrivate(self):
"""
The PyCrypto key instance for a DSA private key has the same components
as the internal key.
"""
key = keys.Key.fromString(keydata.privateDSA_openssh)
result = key.keyObject
self.assertIsInstance(result, Crypto.PublicKey.DSA._DSAobj)
self.assertEqual(keydata.DSAData['y'], result.key.y)
self.assertEqual(keydata.DSAData['g'], result.key.g)
self.assertEqual(keydata.DSAData['p'], result.key.p)
self.assertEqual(keydata.DSAData['q'], result.key.q)
self.assertEqual(keydata.DSAData['x'], result.key.x)
def test_keyObjectSetRSAPublic(self):
"""
Setting the L{keys.Key.keyObject} property to a PyCrypto public RSA key
instance updates the internal key.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
newPyCryptoKey = Crypto.PublicKey.RSA.construct((
keydata.RSAData['n'],
keydata.RSAData['e'],
))
self.assertEqual('DSA', key.type())
key.keyObject = newPyCryptoKey
[warning] = self.flushWarnings([
KeyKeyObjectTests.test_keyObjectSetRSAPublic])
self.assertIs(warning['category'], DeprecationWarning)
self.assertEqual('RSA', key.type())
self.assertEqual({
'n': keydata.RSAData['n'],
'e': keydata.RSAData['e'],
},
key.data())
def test_keyObjectSetRSAPrivate(self):
"""
Setting the L{keys.Key.keyObject} property to a PyCrypto private RSA
key instance updates the internal key.
"""
key = keys.Key.fromString(keydata.publicDSA_openssh)
newPyCryptoKey = Crypto.PublicKey.RSA.construct((
keydata.RSAData['n'],
keydata.RSAData['e'],
keydata.RSAData['d'],
keydata.RSAData['p'],
keydata.RSAData['q'],
keydata.RSAData['u'],
))
self.assertEqual('DSA', key.type())
key.keyObject = newPyCryptoKey
self.assertEqual('RSA', key.type())
self.assertEqual({
'n': keydata.RSAData['n'],
'e': keydata.RSAData['e'],
'd': keydata.RSAData['d'],
'p': keydata.RSAData['p'],
'q': keydata.RSAData['q'],
'u': keydata.RSAData['u'],
},
key.data())
def test_keyObjectSetDSAPublic(self):
"""
Setting the L{keys.Key.keyObject} property to a PyCrypto public DSA key
instance updates the internal key.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
newPyCryptoKey = Crypto.PublicKey.DSA.construct((
keydata.DSAData['y'],
keydata.DSAData['g'],
keydata.DSAData['p'],
keydata.DSAData['q'],
))
self.assertEqual('RSA', key.type())
key.keyObject = newPyCryptoKey
self.assertEqual('DSA', key.type())
self.assertEqual({
'y': keydata.DSAData['y'],
'g': keydata.DSAData['g'],
'p': keydata.DSAData['p'],
'q': keydata.DSAData['q'],
},
key.data())
def test_keyObjectSetDSAPrivate(self):
"""
Setting the L{keys.Key.keyObject} property to a PyCrypto private DSA
key instance updates the internal key.
"""
key = keys.Key.fromString(keydata.publicRSA_openssh)
newPyCryptoKey = Crypto.PublicKey.DSA.construct((
keydata.DSAData['y'],
keydata.DSAData['g'],
keydata.DSAData['p'],
keydata.DSAData['q'],
keydata.DSAData['x'],
))
self.assertEqual('RSA', key.type())
key.keyObject = newPyCryptoKey
self.assertEqual('DSA', key.type())
self.assertEqual({
'y': keydata.DSAData['y'],
'g': keydata.DSAData['g'],
'p': keydata.DSAData['p'],
'q': keydata.DSAData['q'],
'x': keydata.DSAData['x'],
},
key.data())
def test_constructorPyCrypto(self):
"""
Passing a PyCrypto key object to L{keys.Key} is deprecated.
"""
pycryptoKey = Crypto.PublicKey.RSA.construct((
keydata.RSAData['n'],
keydata.RSAData['e']))
key = self.callDeprecated(
(Version('Twisted', 16, 0, 0),
'passing a cryptography key object'),
keys.Key,
pycryptoKey)
self.assertEqual('RSA', key.type())
self.assertEqual({
'n': keydata.RSAData['n'],
'e': keydata.RSAData['e'],
},
key.data())
class PersistentRSAKeyTests(unittest.TestCase):
"""
Tests for L{keys._getPersistentRSAKey}.
"""
if cryptography is None:
skip = skipCryptography
def test_providedArguments(self):
"""
L{keys._getPersistentRSAKey} will put the key in
C{directory}/C{filename}, with the key length of C{keySize}.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
def test_noRegeneration(self):
"""
L{keys._getPersistentRSAKey} will not regenerate the key if the key
already exists.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
self.assertEqual(key.size(), 512)
self.assertTrue(keyFile.exists())
keyContent = keyFile.getContent()
# Set the key size to 1024 bits. Since it exists already, it will find
# the 512 bit key, and not generate a 1024 bit key.
key = keys._getPersistentRSAKey(keyFile, keySize=1024)
self.assertEqual(key.size(), 512)
self.assertEqual(keyFile.getContent(), keyContent)
def test_keySizeZero(self):
"""
If the key generated by L{keys.getPersistentRSAKey} is set to None
the key size should then become 0.
"""
tempDir = FilePath(self.mktemp())
keyFile = tempDir.child("mykey.pem")
key = keys._getPersistentRSAKey(keyFile, keySize=512)
key._keyObject = None
self.assertEqual( key.size(), 0)
| gpl-2.0 |
bcornwellmott/erpnext | erpnext/utilities/transaction_base.py | 14 | 5360 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt
from erpnext.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def load_notification_message(self):
dt = self.doctype.lower().replace(" ", "_")
if int(frappe.db.get_value("Notification Control", None, dt) or 0):
self.set("__notification_message",
frappe.db.get_value("Notification Control", None, dt + "_message"))
def validate_posting_time(self):
# set Edit Posting Date and Time to 1 while data import
if frappe.flags.in_import and self.posting_date:
self.set_posting_time = 1
if not getattr(self, 'set_posting_time', None):
now = now_datetime()
self.posting_date = now.strftime('%Y-%m-%d')
self.posting_time = now.strftime('%H:%M:%S')
def add_calendar_event(self, opts, force=False):
if cstr(self.contact_by) != cstr(self._prev.contact_by) or \
cstr(self.contact_date) != cstr(self._prev.contact_date) or force:
self.delete_events()
self._add_calendar_event(opts)
def delete_events(self):
events = frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (self.doctype, self.name))
if events:
frappe.db.sql("delete from `tabEvent` where name in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
def _add_calendar_event(self, opts):
opts = frappe._dict(opts)
if self.contact_date:
event = frappe.get_doc({
"doctype": "Event",
"owner": opts.owner or self.owner,
"subject": opts.subject,
"description": opts.description,
"starts_on": self.contact_date,
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
})
event.insert(ignore_permissions=True)
if frappe.db.exists("User", self.contact_by):
frappe.share.add("Event", event.name, self.contact_by,
flags={"ignore_share_permission": True})
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4}) ")
.format(d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple([item.as_dict()[fieldname] for item in self.items]))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delete_events(ref_type, ref_name):
frappe.delete_doc("Event", frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (ref_type, ref_name)), for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, basestring):
qty_fields = [qty_fields]
distinct_uoms = list(set([d.get(uom_field) for d in doc.get_all_children()]))
integer_uoms = filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number") or None, distinct_uoms)
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
qty = d.get(f)
if qty:
if abs(cint(qty) - flt(qty)) > 0.0000001:
frappe.throw(_("Quantity ({0}) cannot be a fraction in row {1}").format(qty, d.idx), UOMMustBeIntegerError)
| gpl-3.0 |
vmindru/ansible | lib/ansible/modules/windows/win_file.py | 14 | 2212 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_file
version_added: "1.9.2"
short_description: Creates, touches or removes files or directories
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
- Unlike M(file), does not modify ownership, permissions or manipulate links.
- For non-Windows targets, use the M(file) module instead.
options:
path:
description:
- Path to the file being managed.
required: yes
type: path
aliases: [ dest, name ]
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
- If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
- If C(touch), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
choices: [ absent, directory, file, touch ]
seealso:
- module: assemble
- module: copy
- module: file
- module: template
- module: win_copy
- module: win_stat
- module: win_template
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Touch a file (creates if not present, updates modification time if present)
win_file:
path: C:\Temp\foo.conf
state: touch
- name: Remove a file, if present
win_file:
path: C:\Temp\foo.conf
state: absent
- name: Create directory structure
win_file:
path: C:\Temp\folder\subfolder
state: directory
- name: Remove directory structure
win_file:
path: C:\Temp
state: absent
'''
| gpl-3.0 |
programadorjc/django | django/contrib/gis/shortcuts.py | 388 | 1209 | import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
| bsd-3-clause |
fzimmermann89/pyload | module/remote/ClickAndLoadBackend.py | 42 | 5237 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
import re
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from cgi import FieldStorage
from urllib import unquote
from base64 import standard_b64decode
from binascii import unhexlify
try:
from Crypto.Cipher import AES
except:
pass
from RemoteManager import BackendBase
core = None
js = None
class ClickAndLoadBackend(BackendBase):
def setup(self, host, port):
self.httpd = HTTPServer((host, port), CNLHandler)
global core, js
core = self.m.core
js = core.js
def serve(self):
while self.enabled:
self.httpd.handle_request()
class CNLHandler(BaseHTTPRequestHandler):
def add_package(self, name, urls, queue=0):
print "name", name
print "urls", urls
print "queue", queue
def get_post(self, name, default=""):
if name in self.post:
return self.post[name]
else:
return default
def start_response(self, string):
self.send_response(200)
self.send_header("Content-Length", len(string))
self.send_header("Content-Language", "de")
self.send_header("Vary", "Accept-Language, Cookie")
self.send_header("Cache-Control", "no-cache, must-revalidate")
self.send_header("Content-type", "text/html")
self.end_headers()
def do_GET(self):
path = self.path.strip("/").lower()
#self.wfile.write(path+"\n")
self.map = [ (r"add$", self.add),
(r"addcrypted$", self.addcrypted),
(r"addcrypted2$", self.addcrypted2),
(r"flashgot", self.flashgot),
(r"crossdomain\.xml", self.crossdomain),
(r"checkSupportForUrl", self.checksupport),
(r"jdcheck.js", self.jdcheck),
(r"", self.flash) ]
func = None
for r, f in self.map:
if re.match(r"(flash(got)?/?)?"+r, path):
func = f
break
if func:
try:
resp = func()
if not resp: resp = "success"
resp += "\r\n"
self.start_response(resp)
self.wfile.write(resp)
except Exception,e :
self.send_error(500, str(e))
else:
self.send_error(404, "Not Found")
def do_POST(self):
form = FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
self.post = {}
for name in form.keys():
self.post[name] = form[name].value
return self.do_GET()
def flash(self):
return "JDownloader"
def add(self):
package = self.get_post('referer', 'ClickAndLoad Package')
urls = filter(lambda x: x != "", self.get_post('urls').split("\n"))
self.add_package(package, urls, 0)
def addcrypted(self):
package = self.get_post('referer', 'ClickAndLoad Package')
dlc = self.get_post('crypted').replace(" ", "+")
core.upload_container(package, dlc)
def addcrypted2(self):
package = self.get_post("source", "ClickAndLoad Package")
crypted = self.get_post("crypted")
jk = self.get_post("jk")
crypted = standard_b64decode(unquote(crypted.replace(" ", "+")))
jk = "%s f()" % jk
jk = js.eval(jk)
Key = unhexlify(jk)
IV = Key
obj = AES.new(Key, AES.MODE_CBC, IV)
result = obj.decrypt(crypted).replace("\x00", "").replace("\r","").split("\n")
result = filter(lambda x: x != "", result)
self.add_package(package, result, 0)
def flashgot(self):
autostart = int(self.get_post('autostart', 0))
package = self.get_post('package', "FlashGot")
urls = filter(lambda x: x != "", self.get_post('urls').split("\n"))
self.add_package(package, urls, autostart)
def crossdomain(self):
rep = "<?xml version=\"1.0\"?>\n"
rep += "<!DOCTYPE cross-domain-policy SYSTEM \"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd\">\n"
rep += "<cross-domain-policy>\n"
rep += "<allow-access-from domain=\"*\" />\n"
rep += "</cross-domain-policy>"
return rep
def checksupport(self):
pass
def jdcheck(self):
rep = "jdownloader=true;\n"
rep += "var version='10629';\n"
return rep
| gpl-3.0 |
kevinbluett/cs4032_distributed_file_server | src/threaded/server.py | 3 | 2218 | import select
from thread_pool import *
from helpers import *
class LithiumThreadPoolDispatcher():
""" Dispatches opened sockets into the threadpool """
def __init__(self, server, pair):
server.pool.add_task(self.process_input, server, pair)
def process_input(self, server, pair):
try:
server.handler(server, pair)
except socket.error, e:
print e
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
# remote peer disconnected
print "Detected remote disconnect"
server.count.decr()
class LithiumThreadPoolServer():
MAX_CONNECTIONS = 2000
def __init__(self, host, port, handler, workers=10):
self.pool = LithiumThreadPool(workers)
self.count = AtomicCount()
self.handler = handler
self.host = host
self.port = port
def handle_accept(self, s):
pair = s.accept()
if pair is not None:
sock, addr = pair
self.count.incr()
print 'Incoming connection from %s, socket count %d' % (repr(addr), self.count.count)
if self.count.count > self.MAX_CONNECTIONS:
sock.send("503 - Service unavailable\n")
sock.close()
self.count.decr()
else:
handler = LithiumThreadPoolDispatcher(self, pair)
def loop(self):
self.stop_looper = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((self.host, self.port))
self.sock.listen(5)
print "Starting Lithuim threaded pool server \nListening on %s:%d" % (self.host, self.port)
while not self.stop_looper:
try:
read_sockets, _, _ = select.select([self.sock],[],[])
for sock in read_sockets:
if sock is self.sock:
self.handle_accept(self.sock)
except select.error:
break
def shutdown(self, safe=True):
self.stop_looper = True
if safe and self.pool is not None:
self.pool.shutdown()
self.sock.close()
| mit |
davidkuep/pyiso | setup.py | 1 | 2414 | from setuptools import setup
import codecs
import os
import re
# to release:
# python setup.py register sdist bdist_egg upload
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
# https://github.com/pypa/sampleproject/blob/master/setup.py
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyiso',
packages=['pyiso'],
version=find_version('pyiso', '__init__.py'),
description='Python client libraries for ISO and other power grid data sources.',
long_description=long_description,
author='Anna Schneider',
author_email='anna@watttime.org',
url='https://github.com/WattTime/pyiso',
license='Apache',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='nose.collector',
install_requires=[
'beautifulsoup4',
'pandas>=0.15',
'python-dateutil',
'pytz',
'requests',
'celery>=3.1',
'xlrd',
'lxml',
'html5lib',
],
)
| apache-2.0 |
ammonkey/rhythm-e | plugins/rb/Coroutine.py | 10 | 2219 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2006 - Ed Catmur <ed@catmur.co.uk>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
class Coroutine:
"""A simple message-passing coroutine implementation.
Not thread- or signal-safe.
Usage:
def my_iter (plexer, args):
some_async_task (..., callback=plexer.send (tokens))
yield None
tokens, (data, ) = plexer.receive ()
...
Coroutine (my_iter, args).begin ()
"""
def __init__ (self, iter, *args):
self._continuation = iter (self, *args)
self._executing = False
def _resume (self):
if not self._executing:
self._executing = True
try:
try:
self._continuation.next ()
while self._data:
self._continuation.next ()
except StopIteration:
pass
finally:
self._executing = False
def clear (self):
self._data = []
def begin (self):
self.clear ()
self._resume ()
def send (self, *tokens):
def callback (*args):
self._data.append ((tokens, args))
self._resume ()
return callback
def receive (self):
return self._data.pop (0)
| gpl-2.0 |
noironetworks/horizon | openstack_dashboard/dashboards/identity/domains/panel.py | 16 | 1340 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import keystone
class Domains(horizon.Panel):
name = _("Domains")
slug = 'domains'
policy_rules = (("identity", "identity:get_domain"),
("identity", "identity:list_domains"))
@staticmethod
def can_register():
return keystone.VERSIONS.active >= 3
def can_access(self, context):
if keystone.VERSIONS.active < 3:
return super(Domains, self).can_access(context)
request = context['request']
domain_token = request.session.get('domain_token')
return super(Domains, self).can_access(context) and domain_token
| apache-2.0 |
vacproject/vcycle | ec2_api.py | 1 | 18422 | #!/usr/bin/python
#
# ec2_api.py - an EC2 plugin for Vcycle
#
# Andrew McNab, University of Manchester.
# Copyright (c) 2013-7. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# o Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# o Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contacts: Andrew.McNab@cern.ch http://www.gridpp.ac.uk/vcycle/
#
import pprint
import os
import re
import sys
import stat
import time
import json
import hmac
import shutil
import string
import pycurl
import random
import base64
import urllib
import datetime
import hashlib
import StringIO
import tempfile
import calendar
import vcycle.vacutils
class Ec2Error(Exception):
pass
class Ec2Space(vcycle.BaseSpace):
def __init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes):
# Initialize data structures from configuration files
# Generic initialization
vcycle.BaseSpace.__init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes)
# EC2-specific initialization
try:
self.access_key = parser.get(spaceSectionName, 'access_key')
except Exception as e:
raise Ec2Error('access_key is required in EC2 [space ' + spaceName + '] (' + str(e) + ')')
try:
self.secret_key = parser.get(spaceSectionName, 'secret_key')
except Exception as e:
raise Ec2Error('secret_key is required in EC2 [space ' + spaceName + '] (' + str(e) + ')')
try:
self.url = parser.get(spaceSectionName, 'url')
except Exception as e:
raise Ec2Error('url is required in EC2 [space ' + spaceName + '] (' + str(e) + ')')
if parser.has_option(spaceSectionName, 'version'):
self.version = parser.get(spaceSectionName, 'version').strip()
else:
self.version = '2010-08-31'
if parser.has_option(spaceSectionName, 'region'):
self.region = parser.get(spaceSectionName, 'region').strip()
else:
self.region = 'us-east-1'
if parser.has_option(spaceSectionName, 'service'):
self.service = parser.get(spaceSectionName, 'service').strip()
else:
self.service = 'openstack'
def ec2Sign(self, key, message):
return hmac.new(key, message.encode("utf-8"), hashlib.sha256).digest()
def ec2SignatureKey(self, dateStamp):
kDate = self.ec2Sign(('AWS4' + self.secret_key).encode('utf-8'), dateStamp)
kRegion = self.ec2Sign(kDate, self.region)
kService = self.ec2Sign(kRegion, self.service)
kSigning = self.ec2Sign(kService, 'aws4_request')
return kSigning
def ec2Request(self, formRequest = None, verbose = False, anyStatus = False):
# Wrapper around BaseSpace.httpRequest() that adds correct EC2 Authorization: header
amzTime = datetime.datetime.utcnow()
amzDate = amzTime.strftime('%Y%m%dT%H%M%SZ')
amzDateStamp = amzTime.strftime('%Y%m%d')
uri = '/' + '/'.join(self.url.split('/')[3:])
host = self.url.split('/')[2]
signedHeaderNames = ''
signedHeaderNameValues = ''
headersList = []
# Add the headers in alphabetical order
signedHeaderNames += 'host'
signedHeaderNameValues += 'host:' + host + '\n'
headersList.append('Host: ' + host)
signedHeaderNames += ';x-amz-date'
signedHeaderNameValues += 'x-amz-date:' + amzDate + '\n'
headersList.append('X-Amz-Date: ' + amzDate)
# Now build up the signature bit by bit
formRequestBody = urllib.urlencode(formRequest)
canonicalRequest = 'POST\n' + uri + '\n\n' + signedHeaderNameValues + '\n' + signedHeaderNames + '\n' + hashlib.sha256(formRequestBody).hexdigest()
credentialScope = amzDateStamp + '/' + self.region + '/' + self.service + '/' + 'aws4_request'
stringToSign = 'AWS4-HMAC-SHA256\n' + amzDate + '\n' + credentialScope + '\n' + hashlib.sha256(canonicalRequest).hexdigest()
signature = hmac.new(self.ec2SignatureKey(amzDateStamp), (stringToSign).encode('utf-8'), hashlib.sha256).hexdigest()
authorizationHeaderValue = 'AWS4-HMAC-SHA256 Credential=' + self.access_key + '/' + credentialScope + ', SignedHeaders=' + signedHeaderNames + ', Signature=' + signature
headersList.append('Authorization: ' + authorizationHeaderValue)
return vcycle.BaseSpace.httpRequest(self, self.url, formRequest = formRequestBody, headers = headersList, verbose = verbose, method = 'POST', anyStatus = anyStatus)
def scanMachines(self):
"""Query EC2 service for details of machines in this space"""
# For each machine found in the space, this method is responsible for
# either (a) ignorning non-Vcycle VMs but updating self.totalProcessors
# or (b) creating a Machine object for the VM in self.spaces
try:
result = self.ec2Request( formRequest = { 'Action' : 'DescribeInstances', 'Version' : self.version }, verbose = False )
except Exception as e:
raise Ec2Error('Cannot connect to ' + self.url + ' (' + str(e) + ')')
# Convert machines from None to an empty dictionary since we successfully connected
self.machines = {}
for item1 in result['response']['DescribeInstancesResponse']['reservationSet'][0]['item']:
for oneServer in item1['instancesSet'][0]['item']:
self.totalProcessors += 1 # FIXME: GET THE REAL NUMBER NOT JUST 1
instanceId = oneServer['instanceId'][0]['#text']
instanceState = oneServer['instanceState'][0]['name'][0]['#text']
machineName = None
machinetypeName = None
if 'tagSet' in oneServer and 'item' in oneServer['tagSet'][0]:
for keyValue in oneServer['tagSet'][0]['item']:
key = keyValue['key' ][0]['#text']
value = keyValue['value'][0]['#text']
# save interesting tags (metadata)
if key == 'name':
machineName = value
elif key == 'machinetype':
machinetypeName = value
if machineName is None:
# if still None, then try to find by instanceId
foundMachineNames = self.findMachinesWithFile('instance_id:' + instanceId)
if len(foundMachineNames) == 1:
machineName = foundMachineNames[0]
if not machineName or not machineName.startswith('vcycle-'):
# not one of ours
continue
if not machinetypeName:
machinetypeName = self.getFileContents(machineName, 'machinetype_name')
if not machinetypeName:
# something weird, not ours?
continue
try:
createdTime = int(self.getFileContents(machineName, 'created'))
except:
# something weird, not ours?
continue
# Try to get the IP address
try:
ip = str(oneServer['privateIpAddress'][0]['#text'])
except:
ip = '0.0.0.0'
try:
updatedTime = int(self.getFileContents(machineName, 'updated'))
except:
updatedTime = None
try:
startedTime = calendar.timegm(time.strptime(oneServer['launchTime'][0]['#text'], "%Y-%m-%dT%H:%M:%SZ"))
except:
startedTime = None
if instanceState == 'running':
state = vcycle.MachineState.running
elif instanceState == 'pending':
state = vcycle.MachineState.starting
elif instanceState == 'stopping' or instanceState == 'stopped':
state = vcycle.MachineState.shutdown
elif instanceState == 'shutting-down' or instanceState == 'terminated':
state = vcycle.MachineState.deleting
elif instanceState == 'error':
state = vcycle.MachineState.failed
else:
state = vcycle.MachineState.unknown
if state == vcycle.MachineState.running and ('tagSet' not in oneServer or 'item' not in oneServer['tagSet'][0]):
# Running but no tags yet, so try creating
try:
self.createTags(instanceId, machineName, machinetypeName)
except Exception as e:
vcycle.vacutils.logLine('Adding tags fails with ' + str(e))
self.machines[machineName] = vcycle.shared.Machine(name = machineName,
spaceName = self.spaceName,
state = state,
ip = ip,
createdTime = None,
startedTime = None,
updatedTime = updatedTime,
uuidStr = instanceId,
machinetypeName = machinetypeName)
def getImageID(self, machinetypeName):
"""Get the image ID"""
# Specific image, not managed by Vcycle, lookup ID
if self.machinetypes[machinetypeName].root_image[:6] == 'image:':
return self.machinetypes[machinetypeName].root_image[6:]
raise Ec2Error('Failed to get image ID as no image stored for machinetype ' + machinetypeName)
def getKeyPairName(self, machinetypeName):
"""Get the key pair name from root_public_key"""
# Look for the cached key pair
if hasattr(self.machinetypes[machinetypeName], '_keyPairName'):
if self.machinetypes[machinetypeName]._keyPairName:
return self.machinetypes[machinetypeName]._keyPairName
else:
raise Ec2Error('Key pair "' + self.machinetypes[machinetypeName].root_public_key + '" for machinetype ' + machinetypeName + ' not available!')
# Get the ssh public key from the root_public_key file
if self.machinetypes[machinetypeName].root_public_key[0] == '/':
try:
f = open(self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
Ec2Error('Cannot open ' + self.machinetypes[machinetypeName].root_public_key)
else:
try:
f = open('/var/lib/vcycle/spaces/' + self.spaceName + '/machinetypes/' + self.machinetypeName + '/files/' + self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
Ec2Error('Cannot open /var/lib/vcycle/spaces/' + self.spaceName + '/machinetypes/' + self.machinetypeName + '/files/' + self.machinetypes[machinetypeName].root_public_key)
while True:
try:
line = f.read()
except:
raise Ec2Error('Cannot find ssh-rsa public key line in ' + self.machinetypes[machinetypeName].root_public_key)
if line[:8] == 'ssh-rsa ':
sshPublicKey = line.split(' ')[1]
sshFingerprint = vcycle.vacutils.makeSshFingerprint(line)
break
# Check if public key is there already
try:
result = self.ec2Request( formRequest = { 'Action' : 'DescribeKeyPairs', 'Version' : self.version },
verbose = False )
except Exception as e:
raise Ec2Error('getKeyPairName cannot connect to ' + self.url + ' (' + str(e) + ')')
for keypair in result['response']['DescribeKeyPairsResponse']['keySet'][0]['item']:
try:
if sshFingerprint == keypair['keyFingerprint'][0]['#text']:
self.machinetypes[machinetypeName]._keyPairName = str(keypair['keyName'][0]['#text'])
return self.machinetypes[machinetypeName]._keyPairName
except:
pass
# Not there so we try to add it
keyName = str(time.time()).replace('.','-')
try:
result = self.ec2Request(
formRequest =
{
'Action' : 'ImportKeyPair',
'Version' : self.version,
'KeyName' : keyName,
'PublicKeyMaterial' : base64.b64encode('ssh-rsa ' + sshPublicKey + ' vcycle')
},
verbose = False
)
except Exception as e:
raise Ec2Error('Cannot connect to ' + self.url + ' (' + str(e) + ')')
vcycle.vacutils.logLine('Created key pair ' + keyName + ' for ' + self.machinetypes[machinetypeName].root_public_key + ' in ' + self.spaceName)
self.machinetypes[machinetypeName]._keyPairName = keyName
return self.machinetypes[machinetypeName]._keyPairName
def createMachine(self, machineName, machinetypeName, zone = None):
# EC2-specific machine creation steps
try:
formRequest = { 'Action' : 'RunInstances',
'Version' : self.version,
'MinCount' : '1',
'MaxCount' : '1',
'UserData' : base64.b64encode(self.getFileContents(machineName, 'user_data')),
'ImageId' : self.getImageID(machinetypeName),
'InstanceType' : self.machinetypes[machinetypeName].flavor_names[0] }
if self.machinetypes[machinetypeName].root_public_key:
formRequest['KeyName'] = self.getKeyPairName(machinetypeName)
except Exception as e:
raise Ec2Error('Failed to create new machine: ' + str(e))
try:
result = self.ec2Request( formRequest = formRequest, verbose = False )
except Exception as e:
raise Ec2Error('Cannot connect to ' + self.url + ' (' + str(e) + ')')
try:
instanceId = result['response']['RunInstancesResponse']['instancesSet'][0]['item'][0]['instanceId'][0]['#text']
except:
instanceId = None
else:
self.setFileContents(machineName, 'instance_id:' + instanceId, machineName)
self.setFileContents(machineName, 'instance_id', instanceId)
try:
privateDnsName = result['response']['RunInstancesResponse']['instancesSet'][0]['item'][0]['privateDnsName'][0]['#text']
except:
privateDnsName = None
else:
self.setFileContents(machineName, 'private_dns_name', privateDnsName)
vcycle.vacutils.logLine('Created ' + machineName + ' ( ' + str(instanceId) + ' / ' + str(privateDnsName) + ' ) for ' + machinetypeName + ' within ' + self.spaceName)
self.machines[machineName] = vcycle.shared.Machine(name = machineName,
spaceName = self.spaceName,
state = vcycle.MachineState.starting,
ip = '0.0.0.0',
createdTime = int(time.time()),
startedTime = None,
updatedTime = int(time.time()),
uuidStr = instanceId,
machinetypeName = machinetypeName)
def createTags(self, instanceId, machineName, machinetypeName):
try:
result = self.ec2Request( formRequest = {
'Action' : 'CreateTags',
'Version' : self.version,
'ResourceId.1' : instanceId,
'Tag.1.Key' : 'name',
'Tag.1.Value' : machineName,
'Tag.2.Key' : 'machinetype',
'Tag.2.Value' : machinetypeName,
'Tag.3.Key' : 'machinefeatures',
'Tag.3.Value' : 'https://' + self.https_host + ':' + str(self.https_port) + '/machines/' + self.spaceName + '/' + machineName + '/machinefeatures',
'Tag.4.Key' : 'jobfeatures',
'Tag.4.Value' : 'https://' + self.https_host + ':' + str(self.https_port) + '/machines/' + self.spaceName + '/' + machineName + '/jobfeatures',
'Tag.5.Key' : 'joboutputs',
'Tag.5.Value' : 'https://' + self.https_host + ':' + str(self.https_port) + '/machines/' + self.spaceName + '/' + machineName + '/joboutputs'
},
verbose = False )
except Exception as e:
raise Ec2Error('Adding tags to ' + machineName + ' (' + instanceId + ') fails with ' + str(e))
def deleteOneMachine(self, machineName):
try:
instanceId = self.getFileContents(machineName, 'instance_id')
except:
raise Ec2Error('Cannot find instance_id when trying to delete ' + machineName)
try:
result = self.ec2Request( formRequest = {
'Action' : 'TerminateInstances',
'Version' : self.version,
'InstanceId.1' : instanceId
},
verbose = False )
except Exception as e:
raise Ec2Error('Cannot delete ' + machineName + ' (' + instanceId + ') via ' + self.url + ' (' + str(e) + ')')
if result['status'] == 200:
# For EC2, we want to log the instanceId as well as the machineName
vcycle.vacutils.logLine('Deleted ' + machineName + ' (' + instanceId + ')')
else:
vcycle.vacutils.logLine('Deletion of ' + machineName + ' (' + instanceId + ') fails with code ' + str(result['status']))
| bsd-2-clause |
JackDandy/SickGear | lib/chardet/codingstatemachine.py | 8 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import logging
from .enums import MachineState
class CodingStateMachine(object):
"""
A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active
state machine available, one byte at a time. The state machine changes its
state based on its previous state and the byte it receives. There are 3
states in a state machine that are of interest to an auto-detector:
START state: This is the state to start with, or a legal byte sequence
(i.e. a valid code point) for character has been identified.
ME state: This indicates that the state machine identified a byte sequence
that is specific to the charset it is designed for and that
there is no other possible encoding which can contain this byte
sequence. This will to lead to an immediate positive answer for
the detector.
ERROR state: This indicates the state machine identified an illegal byte
sequence for that encoding. This will lead to an immediate
negative answer for this encoding. Detector will exclude this
encoding from consideration from here on.
"""
def __init__(self, sm):
self._model = sm
self._curr_byte_pos = 0
self._curr_char_len = 0
self._curr_state = None
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self):
self._curr_state = MachineState.START
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
byte_class = self._model['class_table'][c]
if self._curr_state == MachineState.START:
self._curr_byte_pos = 0
self._curr_char_len = self._model['char_len_table'][byte_class]
# from byte's class and state_table, we get its next state
curr_state = (self._curr_state * self._model['class_factor']
+ byte_class)
self._curr_state = self._model['state_table'][curr_state]
self._curr_byte_pos += 1
return self._curr_state
def get_current_charlen(self):
return self._curr_char_len
def get_coding_state_machine(self):
return self._model['name']
@property
def language(self):
return self._model['language']
| gpl-3.0 |
LChristakis/chalice-hunter | lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit |
kohnle-lernmodule/KITexe201based | twisted/internet/_posixserialport.py | 20 | 2116 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial Port Protocol
"""
# system imports
import os, errno
# dependent on pyserial ( http://pyserial.sf.net/ )
# only tested w/ 1.18 (5 Dec 2002)
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
from serialport import BaseSerialPort
# twisted imports
from twisted.internet import abstract, fdesc, main
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A select()able serial device, acting as a transport."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, timeout = 0, xonxoff = 0, rtscts = 0):
abstract.FileDescriptor.__init__(self, reactor)
self._serial = serial.Serial(deviceNameOrPortNumber, baudrate = baudrate, bytesize = bytesize, parity = parity, stopbits = stopbits, timeout = timeout, xonxoff = xonxoff, rtscts = rtscts)
self.reactor = reactor
self.flushInput()
self.flushOutput()
self.protocol = protocol
self.protocol.makeConnection(self)
self.startReading()
def fileno(self):
return self._serial.fd
def writeSomeData(self, data):
"""Write some data to the serial device.
"""
try:
return os.write(self.fileno(), data)
except IOError, io:
if io.args[0] == errno.EAGAIN:
return 0
return main.CONNECTION_LOST
except OSError, ose:
if ose.errno == errno.EAGAIN:
# I think most systems use this one
return 0
raise
def doRead(self):
"""Some data's readable from serial device.
"""
return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)
def connectionLost(self, reason):
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
| gpl-2.0 |
ZuluPro/namebench | nb_third_party/dns/rdtypes/dsbase.py | 248 | 3445 | # Copyright (C) 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.rdata
import dns.rdatatype
class DSBase(dns.rdata.Rdata):
"""Base class for rdata that is like a DS record
@ivar key_tag: the key tag
@type key_tag: int
@ivar algorithm: the algorithm
@type algorithm: int
@ivar digest_type: the digest type
@type digest_type: int
@ivar digest: the digest
@type digest: int
@see: draft-ietf-dnsext-delegation-signer-14.txt"""
__slots__ = ['key_tag', 'algorithm', 'digest_type', 'digest']
def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type,
digest):
super(DSBase, self).__init__(rdclass, rdtype)
self.key_tag = key_tag
self.algorithm = algorithm
self.digest_type = digest_type
self.digest = digest
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.key_tag, self.algorithm,
self.digest_type,
dns.rdata._hexify(self.digest,
chunksize=128))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
key_tag = tok.get_uint16()
algorithm = tok.get_uint8()
digest_type = tok.get_uint8()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
digest = ''.join(chunks)
digest = digest.decode('hex_codec')
return cls(rdclass, rdtype, key_tag, algorithm, digest_type,
digest)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!HBB", self.key_tag, self.algorithm,
self.digest_type)
file.write(header)
file.write(self.digest)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack("!HBB", wire[current : current + 4])
current += 4
rdlen -= 4
digest = wire[current : current + rdlen]
return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
from_wire = classmethod(from_wire)
def _cmp(self, other):
hs = struct.pack("!HBB", self.key_tag, self.algorithm,
self.digest_type)
ho = struct.pack("!HBB", other.key_tag, other.algorithm,
other.digest_type)
v = cmp(hs, ho)
if v == 0:
v = cmp(self.digest, other.digest)
return v
| apache-2.0 |
Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/wheel/test/conftest.py | 83 | 1689 | """
pytest local configuration plug-in
"""
import gc
import warnings
import pytest
@pytest.yield_fixture(scope='function', autouse=True)
def error_on_ResourceWarning():
"""This fixture captures ResourceWarning's and reports an "error"
describing the file handles left open.
This is shown regardless of how successful the test was, if a test fails
and leaves files open then those files will be reported. Ideally, even
those files should be closed properly after a test failure or exception.
Since only Python 3 and PyPy3 have ResourceWarning's, this context will
have no effect when running tests on Python 2 or PyPy.
Because of autouse=True, this function will be automatically enabled for
all test_* functions in this module.
This code is primarily based on the examples found here:
https://stackoverflow.com/questions/24717027/convert-python-3-resourcewarnings-into-exception
"""
try:
ResourceWarning
except NameError:
# Python 2, PyPy
yield
return
# Python 3, PyPy3
with warnings.catch_warnings(record=True) as caught:
warnings.resetwarnings() # clear all filters
warnings.simplefilter('ignore') # ignore all
warnings.simplefilter('always', ResourceWarning) # add filter
yield # run tests in this context
gc.collect() # run garbage collection (for pypy3)
if not caught:
return
pytest.fail('The following file descriptors were not closed properly:\n' +
'\n'.join((str(warning.message) for warning in caught)),
pytrace=False)
| apache-2.0 |
mintoo/NetDim | pyNMS/gis/export_to_google_earth_window.py | 2 | 4065 | # Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from miscellaneous.decorators import update_paths
from objects.objects import node_subtype, link_class
from os.path import join
from PyQt5.QtWidgets import (
QFileDialog,
QGridLayout,
QLabel,
QLineEdit,
QPushButton,
QWidget
)
try:
import simplekml
except ImportError:
warnings.warn('simplekml not installed: export to google earth disabled')
class GoogleEarthExportWindow(QWidget):
github_path = 'https://raw.githubusercontent.com/afourmy/pyNMS/master/Icons'
def __init__(self, controller):
super().__init__()
self.controller = controller
self.setWindowTitle('Export to Google Earth')
node_size = QLabel('Node label size')
self.node_size = QLineEdit('1')
line_width = QLabel('Line width')
self.line_width = QLineEdit('1')
export = QPushButton('Export to KML')
export.clicked.connect(self.kml_export)
self.styles = {}
for subtype in node_subtype:
point_style = simplekml.Style()
point_style.labelstyle.color = simplekml.Color.blue
path_icon = join(self.github_path, 'default_{}.gif'.format(subtype))
point_style.iconstyle.icon.href = path_icon
self.styles[subtype] = point_style
for subtype, cls in link_class.items():
line_style = simplekml.Style()
# we convert the RGB color to a KML color,
# i.e #RRGGBB to #AABBGGRR
kml_color = "#ff{0:02x}{1:02x}{2:02x}".format(*cls.color[::-1])
line_style.linestyle.color = kml_color
self.styles[subtype] = line_style
layout = QGridLayout()
layout.addWidget(node_size, 0, 0)
layout.addWidget(self.node_size, 0, 1)
layout.addWidget(line_width, 2, 0)
layout.addWidget(self.line_width, 2, 1)
layout.addWidget(export, 3, 0, 1, 2)
self.setLayout(layout)
@update_paths
def kml_export(self, _):
kml = simplekml.Kml()
for node in self.network.nodes.values():
point = kml.newpoint(name=node.name, description=node.description)
point.coords = [(node.longitude, node.latitude)]
point.style = self.styles[node.subtype]
point.style.labelstyle.scale = float(self.node_size.text())
for link in self.network.all_links():
line = kml.newlinestring(name=link.name, description=link.description)
line.coords = [(link.source.longitude, link.source.latitude),
(link.destination.longitude, link.destination.latitude)]
line.style = self.styles[link.subtype]
line.style.linestyle.width = self.line_width.text()
filepath = QFileDialog.getSaveFileName(
self,
'KML export',
'project',
'.kml'
)
selected_file = ''.join(filepath)
kml.save(selected_file)
self.close()
| gpl-3.0 |
houssine78/vertical-travel-porting-v8-wip | __unported__/airport/__init__.py | 6 | 1054 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import (
res_partner,
)
| agpl-3.0 |
ruffy91/micropython | examples/switch.py | 100 | 1029 | """
switch.py
=========
Light up some leds when the USR switch on the pyboard is pressed.
Example Usage::
Micro Python v1.0.1 on 2014-05-12; PYBv1.0 with STM32F405RG
Type "help()" for more information.
>>> import switch
>>> switch.run_loop()
Loop started.
Press Ctrl+C to break out of the loop.
"""
import pyb
switch = pyb.Switch()
red_led = pyb.LED(1)
green_led = pyb.LED(2)
orange_led = pyb.LED(3)
blue_led = pyb.LED(4)
all_leds = (red_led, green_led, orange_led, blue_led)
def run_loop(leds=all_leds):
"""
Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects
"""
print('Loop started.\nPress Ctrl+C to break out of the loop.')
while 1:
try:
if switch():
[led.on() for led in leds]
else:
[led.off() for led in leds]
except OSError: # VCPInterrupt # Ctrl+C in interpreter mode.
break
if __name__ == '__main__':
run_loop()
| mit |
faun/django_test | django/contrib/admindocs/views.py | 296 | 15504 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
try:
return urlresolvers.reverse('admin:index')
except urlresolvers.NoReverseMatch:
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': getattr(func, '__name__', func.__class__.__name__),
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
goodwinnk/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_map.py | 327 | 3062 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
| apache-2.0 |
hunter007/django | tests/field_deconstruction/tests.py | 61 | 17839 | from __future__ import unicode_literals
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName")
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission")
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else"})
# Test on_delete
field = models.ForeignKey("auth.User", on_delete=models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar"})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
| bsd-3-clause |
biocyberman/bcbio-nextgen | bcbio/pipeline/genome.py | 1 | 16520 | """Read genome build configurations from Galaxy *.loc and bcbio-nextgen resource files.
"""
from six.moves import configparser
import glob
import os
import sys
from xml.etree import ElementTree
import toolz as tz
import yaml
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.distributed import objectstore
from bcbio.log import logger
from bcbio.ngsalign import star
from bcbio.pipeline import alignment
from bcbio.provenance import do
from bcbio.rnaseq import gtf
# ## bcbio-nextgen genome resource files
def get_resources(genome, ref_file, data):
"""Retrieve genome information from a genome-references.yaml file.
"""
base_dir = os.path.normpath(os.path.dirname(ref_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % genome.replace("-test", ""))
if not os.path.exists(resource_file):
raise IOError("Did not find resource file for %s: %s\n"
"To update bcbio_nextgen.py with genome resources for standard builds, run:\n"
"bcbio_nextgen.py upgrade -u skip"
% (genome, resource_file))
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
def resource_file_path(x):
if isinstance(x, basestring) and os.path.exists(os.path.join(base_dir, x)):
return os.path.normpath(os.path.join(base_dir, x))
return x
cleaned = utils.dictapply(resources, resource_file_path)
return ensure_annotations(cleaned, data)
def add_required_resources(resources):
"""Add empty values for required resources referenced in CWL
"""
required = [["variation", "cosmic"], ["variation", "dbsnp"]]
for key in required:
if not tz.get_in(key, resources):
resources = tz.update_in(resources, key, lambda x: None)
return resources
def ensure_annotations(resources, data):
"""Prepare any potentially missing annotations for downstream processing in a local directory.
"""
transcript_gff = tz.get_in(["rnaseq", "transcripts"], resources)
if transcript_gff and utils.file_exists(transcript_gff):
out_dir = os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "annotations")
resources["rnaseq"]["gene_bed"] = gtf.gtf_to_bed(transcript_gff, out_dir)
return resources
# ## Utilities
def abs_file_paths(xs, base_dir=None, ignore_keys=None, fileonly_keys=None, cur_key=None,
do_download=True):
"""Normalize any file paths found in a subdirectory of configuration input.
base_dir -- directory to normalize relative paths to
ignore_keys -- algorithm key names to ignore normalize for (keywords, not files/directories)
fileonly_keys -- algorithm key names to only expand files (not directories)
cur_key -- current key when calling recursively
"""
ignore_keys = set([]) if ignore_keys is None else set(ignore_keys)
fileonly_keys = set([]) if fileonly_keys is None else set(fileonly_keys)
if base_dir is None:
base_dir = os.getcwd()
orig_dir = os.getcwd()
os.chdir(base_dir)
input_dir = os.path.join(base_dir, "inputs")
if isinstance(xs, dict):
out = {}
for k, v in xs.items():
if k not in ignore_keys and v and isinstance(v, basestring):
if v.lower() == "none":
out[k] = None
else:
out[k] = abs_file_paths(v, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
elif isinstance(v, (list, tuple)):
out[k] = [abs_file_paths(x, base_dir, ignore_keys, fileonly_keys, k, do_download=do_download)
for x in v]
else:
out[k] = v
elif isinstance(xs, basestring):
if os.path.exists(xs) or (do_download and objectstore.is_remote(xs)):
dl = objectstore.download(xs, input_dir)
if dl and cur_key not in ignore_keys and not (cur_key in fileonly_keys and not os.path.isfile(dl)):
out = os.path.normpath(os.path.join(base_dir, dl))
else:
out = xs
else:
out = xs
else:
out = xs
os.chdir(orig_dir)
return out
# ## Galaxy integration -- *.loc files
def _get_galaxy_loc_file(name, galaxy_dt, ref_dir, galaxy_base):
"""Retrieve Galaxy *.loc file for the given reference/aligner name.
First tries to find an aligner specific *.loc file. If not defined
or does not exist, then we need to try and remap it from the
default reference file
"""
if "file" in galaxy_dt and os.path.exists(os.path.join(galaxy_base, galaxy_dt["file"])):
loc_file = os.path.join(galaxy_base, galaxy_dt["file"])
need_remap = False
elif alignment.TOOLS[name].galaxy_loc_file is None:
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
else:
loc_file = os.path.join(ref_dir, alignment.TOOLS[name].galaxy_loc_file)
need_remap = False
if not os.path.exists(loc_file):
loc_file = os.path.join(ref_dir, alignment.BASE_LOCATION_FILE)
need_remap = True
return loc_file, need_remap
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if "column" in galaxy_dt:
dbkey_i = galaxy_dt["column"].index("dbkey")
path_i = galaxy_dt["column"].index("path")
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = [x.strip() for x in line.strip().split("\t")]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(" ") if x.strip()]
if len(parts) > 1:
raise IOError("Galaxy location file uses spaces instead of "
"tabs to separate fields: %s" % loc_file)
if dbkey_i is not None and not need_remap:
dbkey = parts[dbkey_i]
cur_ref = parts[path_i]
else:
if parts[0] == "index":
parts = parts[1:]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref)
def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data):
"""Retrieve reference genome file from Galaxy *.loc file.
Reads from tool_data_table_conf.xml information for the index if it
exists, otherwise uses heuristics to find line based on most common setups.
"""
refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap)
if dbkey == genome_build]
remap_fn = alignment.TOOLS[name].remap_index_fn
need_remap = remap_fn is not None
if len(refs) == 0:
logger.info("Downloading %s %s from AWS" % (genome_build, name))
cur_ref = download_prepped_genome(genome_build, data, name, need_remap)
# allow multiple references in a file and use the most recently added
else:
cur_ref = refs[-1]
# Find genome directory and check for packed wf tarballs
cur_ref_norm = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
base_dir_i = cur_ref_norm.find("/%s/" % genome_build)
base_dir = os.path.join(cur_ref_norm[:base_dir_i], genome_build)
for tarball in glob.glob(os.path.join(base_dir, "*-wf.tar.gz")):
cwlutils.unpack_tarballs(tarball, {"dirs": {"work": base_dir}}, use_subdir=False)
if need_remap:
assert remap_fn is not None, "%s requires remapping function from base location file" % name
cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
cur_ref = remap_fn(os.path.abspath(cur_ref))
return cur_ref
def _get_galaxy_tool_info(galaxy_base):
"""Retrieve Galaxy tool-data information from defaults or galaxy config file.
"""
ini_file = os.path.join(galaxy_base, "universe_wsgi.ini")
info = {"tool_data_table_config_path": os.path.join(galaxy_base, "tool_data_table_conf.xml"),
"tool_data_path": os.path.join(galaxy_base, "tool-data")}
config = configparser.ConfigParser()
config.read(ini_file)
if "app:main" in config.sections():
for option in config.options("app:main"):
if option in info:
info[option] = os.path.join(galaxy_base, config.get("app:main", option))
return info
def _get_galaxy_data_table(name, dt_config_file):
"""Parse data table config file for details on tool *.loc location and columns.
"""
out = {}
if os.path.exists(dt_config_file):
tdtc = ElementTree.parse(dt_config_file)
for t in tdtc.getiterator("table"):
if t.attrib.get("name", "") in [name, "%s_indexes" % name]:
out["column"] = [x.strip() for x in t.find("columns").text.split(",")]
out["file"] = t.find("file").attrib.get("path", "")
return out
def get_refs(genome_build, aligner, galaxy_base, data):
"""Retrieve the reference genome file location from galaxy configuration.
"""
out = {}
name_remap = {"samtools": "fasta"}
if genome_build:
galaxy_config = _get_galaxy_tool_info(galaxy_base)
for name in [x for x in ("samtools", aligner) if x]:
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
cur_ref = _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data)
base = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
if os.path.isdir(base):
indexes = sorted(glob.glob(os.path.join(base, "*")))
elif name != "samtools":
indexes = sorted(glob.glob("%s*" % utils.splitext_plus(base)[0]))
else:
indexes = []
name = name_remap.get(name, name)
out[name] = {}
if os.path.exists(base) and os.path.isfile(base):
out[name]["base"] = base
if indexes:
out[name]["indexes"] = indexes
# For references, add compressed inputs and indexes if they exist
if name == "fasta" and "base" in out[name] and os.path.exists(out[name]["base"] + ".gz"):
indexes = [out[name]["base"] + ".gz.fai", out[name]["base"] + ".gz.gzi",
utils.splitext_plus(out[name]["base"])[0] + ".dict"]
out[name + "gz"] = {"base": out[name]["base"] + ".gz",
"indexes": [x for x in indexes if os.path.exists(x)]}
# add additional indices relative to the base
if tz.get_in(["fasta", "base"], out):
ref_dir, ref_filebase = os.path.split(out["fasta"]["base"])
out["rtg"] = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "rtg",
"%s.sdf" % (os.path.splitext(ref_filebase)[0])))
twobit = os.path.normpath(os.path.join(ref_dir, os.path.pardir, "ucsc",
"%s.2bit" % (os.path.splitext(ref_filebase)[0])))
if os.path.exists(twobit):
out["twobit"] = twobit
return out
def get_builds(galaxy_base):
"""Retrieve configured genome builds and reference files, using Galaxy configuration files.
Allows multiple dbkey specifications in the same file, using the most recently added.
"""
name = "samtools"
galaxy_config = _get_galaxy_tool_info(galaxy_base)
galaxy_dt = _get_galaxy_data_table(name, galaxy_config["tool_data_table_config_path"])
loc_file, need_remap = _get_galaxy_loc_file(name, galaxy_dt, galaxy_config["tool_data_path"],
galaxy_base)
assert not need_remap, "Should not need to remap reference files"
fnames = {}
for dbkey, fname in _galaxy_loc_iter(loc_file, galaxy_dt):
fnames[dbkey] = fname
out = []
for dbkey in sorted(fnames.keys()):
out.append((dbkey, fnames[dbkey]))
return out
# ## Retrieve pre-prepared genomes
REMAP_NAMES = {"tophat2": ["bowtie2"],
"samtools": ["rtg", "seq"]}
INPLACE_INDEX = {"star": star.index}
def download_prepped_genome(genome_build, data, name, need_remap, out_dir=None):
"""Get a pre-prepared genome from S3, unpacking it locally.
Supports runs on AWS where we can retrieve the resources on demand. Upgrades
GEMINI in place if installed inside a Docker container with the biological data.
GEMINI install requires write permissions to standard data directories -- works
on AWS but not generalizable elsewhere.
"""
from bcbio.variation import population
from bcbio import install
if not out_dir:
out_dir = utils.safe_makedir(os.path.join(tz.get_in(["dirs", "work"], data),
"inputs", "data", "genomes"))
for target in REMAP_NAMES.get(name, [name]):
ref_dir = os.path.join(out_dir, genome_build, target)
if not os.path.exists(ref_dir):
if target in INPLACE_INDEX:
ref_file = glob.glob(os.path.normpath(os.path.join(ref_dir, os.pardir, "seq", "*.fa")))[0]
# Need to add genome resources so we can retrieve GTF files for STAR
data["genome_resources"] = get_resources(data["genome_build"], ref_file, data)
INPLACE_INDEX[target](ref_file, ref_dir, data)
else:
# XXX Currently only supports genomes from S3 us-east-1 bucket.
# Need to assess how slow this is from multiple regions and generalize to non-AWS.
fname = objectstore.BIODATA_INFO["s3"].format(build=genome_build, target=target)
try:
objectstore.connect(fname)
except:
raise ValueError("Could not find reference genome file %s %s" % (genome_build, name))
with utils.chdir(out_dir):
cmd = objectstore.cl_input(fname, unpack=False, anonpipe=False) + " | pigz -d -c | tar -xvp"
do.run(cmd.format(**locals()), "Download pre-prepared genome data: %s" % genome_build)
ref_file = glob.glob(os.path.normpath(os.path.join(ref_dir, os.pardir, "seq", "*.fa")))[0]
if data.get("genome_build"):
if (data.get("files") and population.do_db_build([data], need_bam=False)
and population.support_gemini_orig(data)):
# symlink base GEMINI directory to work directory, avoiding write/space issues
out_gemini_dir = utils.safe_makedir(os.path.join(os.path.dirname(ref_dir), "gemini_data"))
orig_gemini_dir = install.get_gemini_dir()
# Remove empty initial directory created by installer
if os.path.isdir(orig_gemini_dir) and len(os.listdir(orig_gemini_dir)) == 0:
if os.path.islink(orig_gemini_dir):
os.remove(orig_gemini_dir)
else:
os.rmdir(orig_gemini_dir)
if not os.path.exists(orig_gemini_dir):
os.symlink(out_gemini_dir, orig_gemini_dir)
cmd = [os.path.join(os.path.dirname(sys.executable), "gemini"), "update", "--dataonly"]
do.run(cmd, "Download GEMINI data")
genome_dir = os.path.join(out_dir, genome_build)
genome_build = genome_build.replace("-test", "")
if need_remap or name == "samtools":
return os.path.join(genome_dir, "seq", "%s.fa" % genome_build)
else:
ref_dir = os.path.join(genome_dir, REMAP_NAMES.get(name, [name])[-1])
base_name = os.path.commonprefix(os.listdir(ref_dir))
while base_name.endswith("."):
base_name = base_name[:-1]
return os.path.join(ref_dir, base_name)
| mit |
goltermann/kubernetes | Godeps/_workspace/src/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
jsilhan/dnf | dnf/conf/__init__.py | 5 | 2403 | # conf.py
# dnf configuration classes.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
The configuration classes and routines in yum are splattered over too many
places, hard to change and debug. The new structure here will replace that. Its
goal is to:
* accept configuration options from all three sources (the main config file,
repo config files, command line switches)
* handle all the logic of storing those and producing related values.
* returning configuration values.
* optionally: asserting no value is overridden once it has been applied
somewhere (e.g. do not let a new repo be initialized with different global
cache path than an already existing one).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from dnf.conf.config import PRIO_DEFAULT, PRIO_MAINCONFIG, PRIO_AUTOMATICCONFIG
from dnf.conf.config import PRIO_REPOCONFIG, PRIO_PLUGINDEFAULT, PRIO_PLUGINCONFIG
from dnf.conf.config import PRIO_COMMANDLINE, PRIO_RUNTIME
from dnf.conf.config import Value
from dnf.conf.config import Option, ListOption, UrlOption, UrlListOption
from dnf.conf.config import PathOption, IntOption, PositiveIntOption
from dnf.conf.config import SecondsOption, BoolOption, FloatOption
from dnf.conf.config import SelectionOption, CaselessSelectionOption
from dnf.conf.config import BytesOption, ThrottleOption
from dnf.conf.config import BaseConfig, MainConf, RepoConf
from dnf.conf.config import ParsingError, ConfigParser
Conf = MainConf
| gpl-2.0 |
Gadal/sympy | sympy/physics/mechanics/tests/test_lagrange.py | 19 | 9351 | from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Point,
RigidBody, LagrangesMethod, Particle,
inertia, Lagrangian)
from sympy import symbols, pi, sin, cos, tan, simplify, Function, \
Derivative, Matrix
def test_disc_on_an_incline_plane():
# Disc rolling on an inclined plane
# First the generalized coordinates are created. The mass center of the
# disc is located from top vertex of the inclined plane by the generalized
# coordinate 'y'. The orientation of the disc is defined by the angle
# 'theta'. The mass of the disc is 'm' and its radius is 'R'. The length of
# the inclined path is 'l', the angle of inclination is 'alpha'. 'g' is the
# gravitational constant.
y, theta = dynamicsymbols('y theta')
yd, thetad = dynamicsymbols('y theta', 1)
m, g, R, l, alpha = symbols('m g R l alpha')
# Next, we create the inertial reference frame 'N'. A reference frame 'A'
# is attached to the inclined plane. Finally a frame is created which is attached to the disk.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [pi/2 - alpha, N.z])
B = A.orientnew('B', 'Axis', [-theta, A.z])
# Creating the disc 'D'; we create the point that represents the mass
# center of the disc and set its velocity. The inertia dyadic of the disc
# is created. Finally, we create the disc.
Do = Point('Do')
Do.set_vel(N, yd * A.x)
I = m * R**2 / 2 * B.z | B.z
D = RigidBody('D', Do, B, m, (I, Do))
# To construct the Lagrangian, 'L', of the disc, we determine its kinetic
# and potential energies, T and U, respectively. L is defined as the
# difference between T and U.
D.set_potential_energy(m * g * (l - y) * sin(alpha))
L = Lagrangian(N, D)
# We then create the list of generalized coordinates and constraint
# equations. The constraint arises due to the disc rolling without slip on
# on the inclined path. We then invoke the 'LagrangesMethod' class and
# supply it the necessary arguments and generate the equations of motion.
# The'rhs' method solves for the q_double_dots (i.e. the second derivative
# with respect to time of the generalized coordinates and the lagrange
# multiplers.
q = [y, theta]
hol_coneqs = [y - R * theta]
m = LagrangesMethod(L, q, hol_coneqs=hol_coneqs)
m.form_lagranges_equations()
rhs = m.rhs()
rhs.simplify()
assert rhs[2] == 2*g*sin(alpha)/3
def test_simp_pen():
# This tests that the equations generated by LagrangesMethod are identical
# to those obtained by hand calculations. The system under consideration is
# the simple pendulum.
# We begin by creating the generalized coordinates as per the requirements
# of LagrangesMethod. Also we created the associate symbols
# that characterize the system: 'm' is the mass of the bob, l is the length
# of the massless rigid rod connecting the bob to a point O fixed in the
# inertial frame.
q, u = dynamicsymbols('q u')
qd, ud = dynamicsymbols('q u ', 1)
l, m, g = symbols('l m g')
# We then create the inertial frame and a frame attached to the massless
# string following which we define the inertial angular velocity of the
# string.
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q, N.z])
A.set_ang_vel(N, qd * N.z)
# Next, we create the point O and fix it in the inertial frame. We then
# locate the point P to which the bob is attached. Its corresponding
# velocity is then determined by the 'two point formula'.
O = Point('O')
O.set_vel(N, 0)
P = O.locatenew('P', l * A.x)
P.v2pt_theory(O, N, A)
# The 'Particle' which represents the bob is then created and its
# Lagrangian generated.
Pa = Particle('Pa', P, m)
Pa.set_potential_energy(- m * g * l * cos(q))
L = Lagrangian(N, Pa)
# The 'LagrangesMethod' class is invoked to obtain equations of motion.
lm = LagrangesMethod(L, [q])
lm.form_lagranges_equations()
RHS = lm.rhs()
assert RHS[1] == -g*sin(q)/l
def test_nonminimal_pendulum():
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose World Frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# Create point P, the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
P.set_vel(N, P.pos_from(pN).dt(N))
pP = Particle('pP', P, m)
# Constraint Equations
f_c = Matrix([q1**2 + q2**2 - L**2])
# Calculate the lagrangian, and form the equations of motion
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1, q2], hol_coneqs=f_c,
forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Check solution
lam1 = LM.lam_vec[0, 0]
eom_sol = Matrix([[m*Derivative(q1, t, t) - 9.8*m + 2*lam1*q1],
[m*Derivative(q2, t, t) + 2*lam1*q2]])
assert LM.eom == eom_sol
# Check multiplier solution
lam_sol = Matrix([(19.6*q1 + 2*q1d**2 + 2*q2d**2)/(4*q1**2/m + 4*q2**2/m)])
assert LM.solve_multipliers(sol_type='Matrix') == lam_sol
def test_dub_pen():
# The system considered is the double pendulum. Like in the
# test of the simple pendulum above, we begin by creating the generalized
# coordinates and the simple generalized speeds and accelerations which
# will be used later. Following this we create frames and points necessary
# for the kinematics. The procedure isn't explicitly explained as this is
# similar to the simple pendulum. Also this is documented on the pydy.org
# website.
q1, q2 = dynamicsymbols('q1 q2')
q1d, q2d = dynamicsymbols('q1 q2', 1)
q1dd, q2dd = dynamicsymbols('q1 q2', 2)
u1, u2 = dynamicsymbols('u1 u2')
u1d, u2d = dynamicsymbols('u1 u2', 1)
l, m, g = symbols('l m g')
N = ReferenceFrame('N')
A = N.orientnew('A', 'Axis', [q1, N.z])
B = N.orientnew('B', 'Axis', [q2, N.z])
A.set_ang_vel(N, q1d * A.z)
B.set_ang_vel(N, q2d * A.z)
O = Point('O')
P = O.locatenew('P', l * A.x)
R = P.locatenew('R', l * B.x)
O.set_vel(N, 0)
P.v2pt_theory(O, N, A)
R.v2pt_theory(P, N, B)
ParP = Particle('ParP', P, m)
ParR = Particle('ParR', R, m)
ParP.set_potential_energy(- m * g * l * cos(q1))
ParR.set_potential_energy(- m * g * l * cos(q1) - m * g * l * cos(q2))
L = Lagrangian(N, ParP, ParR)
lm = LagrangesMethod(L, [q1, q2])
lm.form_lagranges_equations()
assert simplify(l*m*(2*g*sin(q1) + l*sin(q1)*sin(q2)*q2dd
+ l*sin(q1)*cos(q2)*q2d**2 - l*sin(q2)*cos(q1)*q2d**2
+ l*cos(q1)*cos(q2)*q2dd + 2*l*q1dd) - lm.eom[0]) == 0
assert simplify(l*m*(g*sin(q2) + l*sin(q1)*sin(q2)*q1dd
- l*sin(q1)*cos(q2)*q1d**2 + l*sin(q2)*cos(q1)*q1d**2
+ l*cos(q1)*cos(q2)*q1dd + l*q2dd) - lm.eom[1]) == 0
def test_rolling_disc():
# Rolling Disc Example
# Here the rolling disc is formed from the contact point up, removing the
# need to introduce generalized speeds. Only 3 configuration and 3
# speed variables are need to describe this system, along with the
# disc's mass and radius, and the local gravity.
q1, q2, q3 = dynamicsymbols('q1 q2 q3')
q1d, q2d, q3d = dynamicsymbols('q1 q2 q3', 1)
r, m, g = symbols('r m g')
# The kinematics are formed by a series of simple rotations. Each simple
# rotation creates a new frame, and the next rotation is defined by the new
# frame's basis vectors. This example uses a 3-1-2 series of rotations, or
# Z, X, Y series of rotations. Angular velocity for this is defined using
# the second frame's basis (the lean frame).
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
# This is the translational kinematics. We create a point with no velocity
# in N; this is the contact point between the disc and ground. Next we form
# the position vector from the contact point to the disc's center of mass.
# Finally we form the velocity and acceleration of the disc.
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
# Forming the inertia dyadic.
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
# Finally we form the equations of motion, using the same steps we did
# before. Supply the Lagrangian, the generalized speeds.
BodyD.set_potential_energy(- m * g * r * cos(q2))
Lag = Lagrangian(N, BodyD)
q = [q1, q2, q3]
q1 = Function('q1')
q2 = Function('q2')
q3 = Function('q3')
l = LagrangesMethod(Lag, q)
l.form_lagranges_equations()
RHS = l.rhs()
RHS.simplify()
t = symbols('t')
assert (l.mass_matrix[3:6] == [0, 5*m*r**2/4, 0])
assert RHS[4].simplify() == (
(-8*g*sin(q2(t)) + r*(5*sin(2*q2(t))*Derivative(q1(t), t) +
12*cos(q2(t))*Derivative(q3(t), t))*Derivative(q1(t), t))/(10*r))
assert RHS[5] == (-5*cos(q2(t))*Derivative(q1(t), t) + 6*tan(q2(t)
)*Derivative(q3(t), t) + 4*Derivative(q1(t), t)/cos(q2(t))
)*Derivative(q2(t), t)
| bsd-3-clause |
makkalot/func | funcweb/funcweb/widget_validation.py | 6 | 12760 | from turbogears import validators #the shiny validator part
class WidgetSchemaFactory(object):
"""
The purpose of the class is to produce a
validators.Schema object according to method
arguments that are retrieved from minions
"""
def __init__(self,method_argument_dict):
"""
@param method_argument_dict : The dict that is
from minion in format of {'arg':{'type':'string','options':[...]}}
the format is defined in func/minion/func_arg.py
"""
self.method_argument_dict = method_argument_dict
self.validator_list = {} #the validator that will create final schema
def _add_validators(self):
"""
Method is an entry point of factory iters over the all arguments
and according to their types it sends the process to more specialized
validator adders
"""
# a mirror var to show that these are same things
mirror_case = {'list*':'list'}
for argument_name,argument_values in self.method_argument_dict.iteritems():
#some lazy stuff :)
#for ex : _add_int_validator(some_arg)
current_type = argument_values['type']
if current_type == "list*":
getattr(self,"_add_%s_validator"%(mirror_case[current_type]))(argument_name)
else:
getattr(self,"_add_%s_validator"%(current_type))(argument_name)
def _add_boolean_validator(self,argument_name):
bool_data_set = {}
#the optional keyword
if self.method_argument_dict[argument_name].has_key('optional'):
if self.method_argument_dict[argument_name]['optional']:
bool_data_set['not_empty']=False
else:
bool_data_set['not_empty']=True
if bool_data_set:
self.validator_list[argument_name]=validators.Bool(**bool_data_set)
else:
self.validator_list[argument_name]=validators.Bool()
def _add_int_validator(self,argument_name):
"""
Gets the options of the int type and adds a
new validator to validator_list
"""
#the initializer for the int_validator
int_data_set = {}
#the optional keyword
if self.method_argument_dict[argument_name].has_key('optional'):
if self.method_argument_dict[argument_name]['optional']:
int_data_set['not_empty']=False
else:
int_data_set['not_empty']=True
if self.method_argument_dict[argument_name].has_key('range'):
#because the range is [min,max] list the 0 is min 1 is the max
int_data_set['min']=self.method_argument_dict[argument_name]['range'][0]
int_data_set['max']=self.method_argument_dict[argument_name]['range'][1]
if self.method_argument_dict[argument_name].has_key('min'):
int_data_set['min']=self.method_argument_dict[argument_name]['min']
if self.method_argument_dict[argument_name].has_key('max'):
int_data_set['max']=self.method_argument_dict[argument_name]['max']
#add the validator to the list
if int_data_set:
self.validator_list[argument_name]=MinionIntValidator(**int_data_set)
else:
self.validator_list[argument_name]=MinionIntValidator()
def _add_string_validator(self,argument_name):
"""
Gets the options of the string type and adds a
new validator to validator_list
"""
string_data_set={}
str_validator_list =[]
if self.method_argument_dict[argument_name].has_key('optional'):
if self.method_argument_dict[argument_name]['optional']:
string_data_set['not_empty']=False
else:
string_data_set['not_empty']=True
if self.method_argument_dict[argument_name].has_key('min_length'):
string_data_set['min']=self.method_argument_dict[argument_name]['min_length']
if self.method_argument_dict[argument_name].has_key('max_length'):
string_data_set['max']=self.method_argument_dict[argument_name]['max_length']
if self.method_argument_dict[argument_name].has_key('validator'):
str_validator_list.append(getattr(validators,'Regex')(self.method_argument_dict[argument_name]['validator']))
#if we have set a string_data_set
if string_data_set:
str_validator_list.append(getattr(validators,'String')(**string_data_set))
#if true it should be a validator.All thing
if len(str_validator_list)>1:
self.validator_list[argument_name]=getattr(validators,'All')(*str_validator_list)
elif str_validator_list:
self.validator_list[argument_name]=str_validator_list[0]
else: #if there is no option
self.validator_list[argument_name]=getattr(validators,'String')()
def _add_float_validator(self,argument_name):
"""
Gets the options of the float type and adds a
new validator to validator_list
"""
#the initializer for the float_validator
float_data_set = {}
#is it optional
if self.method_argument_dict[argument_name].has_key('optional'):
if self.method_argument_dict[argument_name]['optional']:
float_data_set['not_empty']=False
else:
float_data_set['not_empty']=True
if self.method_argument_dict[argument_name].has_key('min'):
float_data_set['min']=self.method_argument_dict[argument_name]['min']
if self.method_argument_dict[argument_name].has_key('max'):
float_data_set['max']=self.method_argument_dict[argument_name]['max']
#add the validator to the list
if float_data_set:
self.validator_list[argument_name]=MinionFloatValidator(**float_data_set)
else:
self.validator_list[argument_name]=MinionFloatValidator()
def _add_list_validator(self,argument_name,the_type='list'):
"""
Gets the options of the list type and adds a
new validator to validator_list
"""
list_data_set = {}
#is it optional
if self.method_argument_dict[argument_name].has_key('optional'):
if self.method_argument_dict[argument_name]['optional']:
list_data_set['not_empty']=False
else:
list_data_set['not_empty']=True
if self.method_argument_dict[argument_name].has_key('validator'):
list_data_set['regex_string'] = self.method_argument_dict[argument_name]['validator']
if list_data_set:
if the_type == 'list':
self.validator_list[argument_name]=MinionListValidator(**list_data_set)
else:
self.validator_list[argument_name]=MinionHashValidator(**list_data_set)
else:
if the_type == 'list':
self.validator_list[argument_name]=MinionListValidator()
else:
self.validator_list[argument_name]=MinionHashValidator()
def _add_hash_validator(self,argument_name):
"""
Gets the options of the hash type and adds a
new validator to validator_list
"""
self._add_list_validator(argument_name,the_type = 'hash')
def get_ready_schema(self):
"""
Get the final validator schema
"""
final_schema = validators.Schema()
if not self.validator_list:
self._add_validators()
for vd_name,vd in self.validator_list.iteritems():
#setattr(final_schema,vd_name,vd)
getattr(final_schema,'fields')[vd_name]= vd
return final_schema
########################################################################
class MinionIntValidator(validators.FancyValidator):
"""
Confirms that the input/output is of the proper type of int.
"""
#automatically will be assigned
min = None
max = None
def _to_python(self,value,state):
"""
Will check just the type here and return
value to be validated in validate_python
"""
try:
value = int(value)
except (ValueError, TypeError):
raise validators.Invalid('The field should be integer',value,state)
return int(value)
def validate_python(self,value,state):
"""
The actual validator
"""
#firstly run the supers one
if self.min and self.min:
if value < self.min:
raise validators.Invalid('The number you entered should be bigger that %d'%(self.min),value,state)
if self.max and self.max:
if value > self.max:
raise validators.Invalid('The number you entered exceeds the %d'%(self.max),value,state)
##################################################################
class MinionFloatValidator(MinionIntValidator):
def _to_python(self,value,state):
"""
Will check just the type here and return
value to be validated in validate_python
"""
try:
value = float(value)
except (ValueError, TypeError):
raise validators.Invalid('The field should be a float',value,state)
return float(value)
#################################################################
class MinionListValidator(validators.FancyValidator):
regex_string = None
def _to_python(self,value,state):
"""
Will check just the type here and return
value to be validated in validate_python
"""
#will add more beautiful validation here after
#integrate the complex widgets for lists and dicts
#print "Im in the list validator the value i recieved is : ",value
if self.not_empty:
if len(value)==0:
raise validators.Invalid('Empty list passed when not_empty is set',value,state)
tmp = []
if type(tmp) != type(value):
value = list(value)
#concert the data to proper format
final_list = []
for hash_data in value:
final_list.extend(hash_data.values())
return final_list
def validate_python(self,value,state):
import re
if self.regex_string:
try:
compiled_regex = re.compile(self.regex_string)
except Exception,e:
raise validators.Invalid('The passed regex_string is not a valid expression'%self.regex_string,value,state)
for list_value in value:
if not re.match(compiled_regex,str(list_value)):
raise validators.Invalid('The %s doesnt match to the regex expression that was supplied'%list_value,value,state)
#there is no else for now :)
class MinionHashValidator(validators.FancyValidator):
regex_string = None
def _to_python(self,value,state):
"""
Will check just the type here and return
value to be validated in validate_python
"""
#will add more beautiful validation here after
#integrate the complex widgets for lists and dicts
#print "Im in hash validator the value i recieved is ",value
if self.not_empty:
if len(value)==0:
raise validators.Invalid('Empty hash passed when not_empty is set',value,state)
#concert the data to proper format
final_hash = {}
for hash_data in value:
final_hash[hash_data['keyfield']] = hash_data['valuefield']
#check the type firstly
tmp = {}
if type(tmp) != type(final_hash):
raise validators.Invalid('The value passed to MinionHashValidator should be a dict object',final_hash,state)
#print value
return final_hash
def validate_python(self,value,state):
#print value
import re
if self.regex_string:
try:
compiled_regex = re.compile(self.regex_string)
except Exception,e:
raise validators.Invalid('The passed regex_string is not a valid expression'%self.regex_string,value,state)
for dict_value in value.itervalues():
if not re.match(compiled_regex,str(dict_value)):
raise validators.Invalid('The %s doesnt match to the regex expression that was supplied'%dict_value,value,state)
if __name__ == "__main__":
pass
| gpl-2.0 |
neilLasrado/frappe | frappe/core/doctype/data_import/test_data_import.py | 6 | 4346 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.core.doctype.data_export import exporter
from frappe.core.doctype.data_import import importer
from frappe.utils.csvutils import read_csv_content
class TestDataImport(unittest.TestCase):
def test_export(self):
exporter.export_data("User", all_doctypes=True, template=True)
content = read_csv_content(frappe.response.result)
self.assertTrue(content[1][1], "User")
def test_export_with_data(self):
exporter.export_data("User", all_doctypes=True, template=True, with_data=True)
content = read_csv_content(frappe.response.result)
self.assertTrue(content[1][1], "User")
self.assertTrue('"Administrator"' in [c[1] for c in content if len(c)>1])
def test_export_with_all_doctypes(self):
exporter.export_data("User", all_doctypes="Yes", template=True, with_data=True)
content = read_csv_content(frappe.response.result)
self.assertTrue(content[1][1], "User")
self.assertTrue('"Administrator"' in [c[1] for c in content if len(c)>1])
self.assertEqual(content[13][0], "DocType:")
self.assertEqual(content[13][1], "User")
self.assertTrue("Has Role" in content[13])
def test_import(self):
if frappe.db.exists("Blog Category", "test-category"):
frappe.delete_doc("Blog Category", "test-category")
exporter.export_data("Blog Category", all_doctypes=True, template=True)
content = read_csv_content(frappe.response.result)
content.append(["", "", "test-category", "Test Cateogry"])
importer.upload(content)
self.assertTrue(frappe.db.get_value("Blog Category", "test-category", "title"), "Test Category")
# export with data
exporter.export_data("Blog Category", all_doctypes=True, template=True, with_data=True)
content = read_csv_content(frappe.response.result)
# overwrite
content[-1][3] = "New Title"
importer.upload(content, overwrite=True)
self.assertTrue(frappe.db.get_value("Blog Category", "test-category", "title"), "New Title")
def test_import_only_children(self):
user_email = "test_import_userrole@example.com"
if frappe.db.exists("User", user_email):
frappe.delete_doc("User", user_email)
frappe.get_doc({"doctype": "User", "email": user_email, "first_name": "Test Import UserRole"}).insert()
exporter.export_data("Has Role", "User", all_doctypes=True, template=True)
content = read_csv_content(frappe.response.result)
content.append(["", "test_import_userrole@example.com", "Blogger"])
importer.upload(content)
user = frappe.get_doc("User", user_email)
self.assertTrue(frappe.db.get_value("Has Role", filters={"role": "Blogger", "parent": user_email, "parenttype": "User"}))
self.assertTrue(user.get("roles")[0].role, "Blogger")
# overwrite
exporter.export_data("Has Role", "User", all_doctypes=True, template=True)
content = read_csv_content(frappe.response.result)
content.append(["", "test_import_userrole@example.com", "Website Manager"])
importer.upload(content, overwrite=True)
user = frappe.get_doc("User", user_email)
self.assertEqual(len(user.get("roles")), 1)
self.assertTrue(user.get("roles")[0].role, "Website Manager")
def test_import_with_children(self): #pylint: disable=R0201
if frappe.db.exists("Event", "EV00001"):
frappe.delete_doc("Event", "EV00001")
exporter.export_data("Event", all_doctypes="Yes", template=True)
content = read_csv_content(frappe.response.result)
content.append([None] * len(content[-2]))
content[-1][1] = "EV00001"
content[-1][2] = "__Test Event with children"
content[-1][3] = "Private"
content[-1][4] = "2014-01-01 10:00:00.000000"
importer.upload(content)
frappe.get_doc("Event", {"subject":"__Test Event with children"})
def test_excel_import(self):
if frappe.db.exists("Event", "EV00001"):
frappe.delete_doc("Event", "EV00001")
exporter.export_data("Event", all_doctypes=True, template=True, file_type="Excel")
from frappe.utils.xlsxutils import read_xlsx_file_from_attached_file
content = read_xlsx_file_from_attached_file(fcontent=frappe.response.filecontent)
content.append(["", "EV00001", "_test", "Private", "05-11-2017 13:51:48", "0", "0", "", "1", "blue"])
importer.upload(content)
self.assertTrue(frappe.db.get_value("Event", "EV00001", "subject"), "_test")
| mit |
scott-eddy/mavlink | pymavlink/tools/magfit_rotation_gps.py | 43 | 3862 | #!/usr/bin/env python
'''
fit best estimate of magnetometer rotation to GPS data
'''
import sys, time, os, math
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--declination", default=0.0, type=float, help="magnetic declination")
parser.add_argument("--min-speed", default=4.0, type=float, help="minimum GPS speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.rotmat import Vector3, Matrix3
from math import radians, degrees, sin, cos, atan2
class Rotation(object):
def __init__(self, roll, pitch, yaw, r):
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.r = r
def in_rotations_list(rotations, m):
for r in rotations:
m2 = m.transposed() * r.r
(r, p, y) = m2.to_euler()
if (abs(r) < radians(1) and
abs(p) < radians(1) and
abs(y) < radians(1)):
return True
return False
def generate_rotations():
'''generate all 90 degree rotations'''
rotations = []
for yaw in [0, 90, 180, 270]:
for pitch in [0, 90, 180, 270]:
for roll in [0, 90, 180, 270]:
m = Matrix3()
m.from_euler(radians(roll), radians(pitch), radians(yaw))
if not in_rotations_list(rotations, m):
rotations.append(Rotation(roll, pitch, yaw, m))
return rotations
def angle_diff(angle1, angle2):
'''give the difference between two angles in degrees'''
ret = angle1 - angle2
if ret > 180:
ret -= 360;
if ret < -180:
ret += 360
return ret
def heading_difference(mag, attitude, declination):
r = attitude.roll
p = attitude.pitch
headX = mag.x*cos(p) + mag.y*sin(r)*sin(p) + mag.z*cos(r)*sin(p)
headY = mag.y*cos(r) - mag.z*sin(r)
heading = degrees(atan2(-headY,headX)) + declination
heading2 = degrees(attitude.yaw)
return abs(angle_diff(heading, heading2))
def add_errors(mag, attitude, total_error, rotations):
for i in range(len(rotations)):
r = rotations[i].r
rmag = r * mag
total_error[i] += heading_difference(rmag, attitude, args.declination)
def magfit(logfile):
'''find best magnetometer rotation fit to a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps)
# generate 90 degree rotations
rotations = generate_rotations()
print("Generated %u rotations" % len(rotations))
count = 0
total_error = [0]*len(rotations)
attitude = None
gps = None
# now gather all the data
while True:
m = mlog.recv_match()
if m is None:
break
if m.get_type() == "ATTITUDE":
attitude = m
if m.get_type() == "GPS_RAW_INT":
gps = m
if m.get_type() == "RAW_IMU":
mag = Vector3(m.xmag, m.ymag, m.zmag)
if attitude is not None and gps is not None and gps.vel > args.min_speed*100 and gps.fix_type>=3:
add_errors(mag, attitude, total_error, rotations)
count += 1
best_i = 0
best_err = total_error[0]
for i in range(len(rotations)):
r = rotations[i]
print("(%u,%u,%u) err=%.2f" % (
r.roll,
r.pitch,
r.yaw,
total_error[i]/count))
if total_error[i] < best_err:
best_i = i
best_err = total_error[i]
r = rotations[best_i]
print("Best rotation (%u,%u,%u) err=%.2f" % (
r.roll,
r.pitch,
r.yaw,
best_err/count))
for filename in args.logs:
magfit(filename)
| lgpl-3.0 |
calfonso/ansible | test/integration/targets/module_precedence/lib_with_extension/ping.py | 320 | 2144 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
result['location'] = 'library'
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
gandalfk7/user_list | user_list_xml_2.py | 1 | 5695 | #! /usr/bin/env python
# LICENSE:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# INFO:
# This script has been developed under CentOS and has been tested with CentOS 6.7 and Oracle Linux Server 6.4
# it requires python 2.x and root privileges (to read the shadow file)
# it exports and .xml file named: HOSTNAME_UserGroupList_DATE.xml containing data of users and groups.
import pwd
import grp
import spwd
import operator
import re
import os
import csv
import time
line_open = '<tr><td>'
line_mid = '</td><td>'
line_close = '</td></tr>\n'
xmlfilename = str(os.uname()[1].split(".")[0]) + "_UserGroupList_" + str(time.strftime("%Y%m%d")) + ".xml" #builds the filename with the hostname, the arbitrary title, the date and the extension
file = open(xmlfilename, "w")
users = [] #create emtpy user list
for u in pwd.getpwall(): #retrieve and walk the user list
users.append(u[0]) #populate the user list
file.write('<?xml version="1.0" encoding="utf-8"?><html lang="en-US" xmlns="http://www.w3.org/1999/xhtml"><head><style>\n')
file.write('table {border-collapse: collapse;}\n')
file.write('table, td, th {border: 1px solid gray;}\n')
file.write('</style></head><body style="font-size: 11pt; font-family: Verdana, Arial;"><table><tr class="sep" style="background-color: silver;"><td><b>Username</b></td><td><b>Primary Group</b></td><td><b>Other Groups</b></td><td><b>Locked</b></td><td><b>User Real Name</b></td><td><b>User Type</b></td><td><b>User Company</b></td><td><b>System User Type</b></td><td><b>Last Access</b></td><td><b>Home Dir</b></td></tr>\n')
for user in users: #main loop, for every user gets the needed data
group = [g.gr_name for g in grp.getgrall() if user in g.gr_mem] #gets the groups the user is member of (but not the primary)
str_rep_group = str(group) #transforms the array in a usable object
str_group = str_rep_group.replace("', '"," ").replace("[","").replace("]","").replace("'","") #sanitizes the string
if not str_group: str_group = " " #if the string is empty displays a empty space to retain csv alignment
home = pwd.getpwnam(user).pw_dir #gets the homedir of the user
gr_id = pwd.getpwnam(user).pw_gid #gets the id of the primary group of the user
main_gr = grp.getgrgid(gr_id)[0] #gets the name of the group using the id
if len(spwd.getspnam(user).sp_pwd) >= 3: sys_usr = "User" #to understand if a user is a system user, it checks if the password is longer than 3chars, if it is, the user is considered a non-system user
else: sys_usr = "System" #
if "!!" in spwd.getspnam(user).sp_pwd: usr_lock = "yes" #to understand if the user is locked or not, looks for !! in the password
else: usr_lock = " " #
f=os.popen("last " + user + " | head -n 1 | grep " + user) #command used to retrieve the last access
usr_lastacc = f.readlines() #read the output of the command for the last access
rep_usr_lastacc = str(usr_lastacc) #transforms the array in a usable object
str_usr_lastacc = rep_usr_lastacc.replace("[","").replace("]","").replace(",","").replace("'","").rstrip() #sanitizes the string
if not str_usr_lastacc: str_usr_lastacc = "user never logged on" #if the string is empty, the filed is filled with text stating the user never logged on the system
#
otherfields = pwd.getpwnam(user).pw_gecos
if "ATTRIBS" in otherfields:
# print otherfields.split("ATTRIBS",1)[1].split(',')[2]
user_realname = otherfields.split("ATTRIBS",1)[1].split(',')[1]
user_type = otherfields.split("ATTRIBS",1)[1].split(',')[2]
user_company = otherfields.split("ATTRIBS",1)[1].split(',')[3]
# user_tbd = otherfields.split("ATTRIBS",1)[1].split(',')[4]
else:
user_realname = "_NOT_SET"
user_type = "_NOT_SET"
user_company = "_NOT_SET"
# user_tbd = "_NOT_SET"
#
file.write(line_open + user + line_mid + main_gr + line_mid + str_group + line_mid + usr_lock + line_mid + user_realname + line_mid + user_type + line_mid + user_company + line_mid + sys_usr + line_mid + str_usr_lastacc + line_mid + home + line_close)
# print "-----------------------------------------------------------" #DEBUG OUTPUT
# print "User: " + user #DEBUG OUTPUT
# print "primary group: " + main_gr #DEBUG OUTPUT
# print "other groups: " + str_group #DEBUG OUTPUT
# print "user locked?: " + usr_lock #DEBUG OUTPUT
# print "system user?: " + sys_usr #DEBUG OUTPUT
# print "last access: " + str_usr_lastacc #DEBUG OUTPUT
# print "home dir: " + home #DEBUG OUTPUT
file.write('</table>\n')
file.write('<br /><br />\n')
file.write('<table><tr style="background-color: silver;"><td><b>Group</b></td><td><b>Members</b></td></tr>')
all_groups = grp.getgrall()
for group in sorted(all_groups):
file.write(line_open + str(group.gr_name) + line_mid + str(group.gr_mem).replace("', '"," ").replace("[","").replace("]","").replace("'","") + line_close)
file.write('</table></body></html>\n')
file.close()
| gpl-2.0 |
amitsela/incubator-beam | sdks/python/apache_beam/runners/runner_test.py | 6 | 4488 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the PipelineRunner and DirectRunner classes.
Note that PipelineRunner and DirectRunner functionality is tested in all
the other unit tests. In this file we choose to test only aspects related to
caching and clearing values that are not tested elsewhere.
"""
import unittest
import hamcrest as hc
import apache_beam as beam
import apache_beam.transforms as ptransform
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metricbase import MetricName
from apache_beam.pipeline import Pipeline
from apache_beam.runners import DirectRunner
from apache_beam.runners import create_runner
from apache_beam.transforms.util import assert_that
from apache_beam.transforms.util import equal_to
from apache_beam.utils.pipeline_options import PipelineOptions
class RunnerTest(unittest.TestCase):
default_properties = [
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth=True']
def test_create_runner(self):
self.assertTrue(
isinstance(create_runner('DirectRunner'), DirectRunner))
self.assertRaises(ValueError, create_runner, 'xyz')
def test_create_runner_shorthand(self):
self.assertTrue(
isinstance(create_runner('DiReCtRuNnEr'), DirectRunner))
self.assertTrue(
isinstance(create_runner('directrunner'), DirectRunner))
self.assertTrue(
isinstance(create_runner('direct'), DirectRunner))
self.assertTrue(
isinstance(create_runner('DiReCt'), DirectRunner))
self.assertTrue(
isinstance(create_runner('Direct'), DirectRunner))
def test_direct_runner_metrics(self):
from apache_beam.metrics.metric import Metrics
class MyDoFn(beam.DoFn):
def start_bundle(self):
count = Metrics.counter(self.__class__, 'bundles')
count.inc()
def finish_bundle(self):
count = Metrics.counter(self.__class__, 'finished_bundles')
count.inc()
def process(self, element):
count = Metrics.counter(self.__class__, 'elements')
count.inc()
distro = Metrics.distribution(self.__class__, 'element_dist')
distro.update(element)
return [element]
runner = DirectRunner()
p = Pipeline(runner,
options=PipelineOptions(self.default_properties))
pcoll = (p | ptransform.Create([1, 2, 3, 4, 5])
| 'Do' >> beam.ParDo(MyDoFn()))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5]))
result = p.run()
result.wait_until_finish()
metrics = result.metrics().query()
namespace = '{}.{}'.format(MyDoFn.__module__,
MyDoFn.__name__)
hc.assert_that(
metrics['counters'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'elements')),
5, 5),
MetricResult(
MetricKey('Do', MetricName(namespace, 'bundles')),
1, 1),
MetricResult(
MetricKey('Do', MetricName(namespace, 'finished_bundles')),
1, 1)))
hc.assert_that(
metrics['distributions'],
hc.contains_inanyorder(
MetricResult(
MetricKey('Do', MetricName(namespace, 'element_dist')),
DistributionResult(DistributionData(15, 5, 1, 5)),
DistributionResult(DistributionData(15, 5, 1, 5)))))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.