repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
sergio-incaser/odoo | addons/crm/crm_phonecall.py | 66 | 14918 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class crm_phonecall(osv.osv):
""" Model for CRM phonecalls """
_name = "crm.phonecall"
_description = "Phonecall"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'create_date': fields.datetime('Creation Date' , readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.'),
'user_id': fields.many2one('res.users', 'Responsible'),
'partner_id': fields.many2one('res.partner', 'Contact'),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection(
[('open', 'Confirmed'),
('cancel', 'Cancelled'),
('pending', 'Pending'),
('done', 'Held')
], string='Status', readonly=True, track_visibility='onchange',
help='The status is set to Confirmed, when a case is created.\n'
'When the call is over, the status is set to Held.\n'
'If the callis not applicable anymore, the status can be set to Cancelled.'),
'email_from': fields.char('Email', size=128, help="These people will receive email."),
'date_open': fields.datetime('Opened', readonly=True),
# phonecall fields
'name': fields.char('Call Summary', required=True),
'active': fields.boolean('Active', required=False),
'duration': fields.float('Duration', help='Duration in minutes and seconds.'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',section_id),('section_id','=',False),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_phone': fields.char('Phone'),
'partner_mobile': fields.char('Mobile'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Date'),
'opportunity_id': fields.many2one ('crm.lead', 'Lead/Opportunity'),
}
def _get_default_state(self, cr, uid, context=None):
if context and context.get('default_state'):
return context.get('default_state')
return 'open'
_defaults = {
'date': fields.datetime.now,
'priority': '1',
'state': _get_default_state,
'user_id': lambda self, cr, uid, ctx: uid,
'active': 1
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'partner_phone': partner.phone,
'partner_mobile': partner.mobile,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') == 'done':
values['date_closed'] = fields.datetime.now()
self.compute_duration(cr, uid, ids, context=context)
elif values.get('state') == 'open':
values['date_open'] = fields.datetime.now()
values['duration'] = 0.0
return super(crm_phonecall, self).write(cr, uid, ids, values, context=context)
def compute_duration(self, cr, uid, ids, context=None):
for phonecall in self.browse(cr, uid, ids, context=context):
if phonecall.duration <= 0:
duration = datetime.now() - datetime.strptime(phonecall.date, DEFAULT_SERVER_DATETIME_FORMAT)
values = {'duration': duration.seconds/float(60)}
self.write(cr, uid, [phonecall.id], values, context=context)
return True
def schedule_another_phonecall(self, cr, uid, ids, schedule_time, call_summary, \
user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
action :('schedule','Schedule a call'), ('log','Log a call')
"""
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for call in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = call.section_id and call.section_id.id or False
if not user_id:
user_id = call.user_id and call.user_id.id or False
if not schedule_time:
schedule_time = call.date
vals = {
'name' : call_summary,
'user_id' : user_id or False,
'categ_id' : categ_id or False,
'description' : call.description or False,
'date' : schedule_time,
'section_id' : section_id or False,
'partner_id': call.partner_id and call.partner_id.id or False,
'partner_phone' : call.partner_phone,
'partner_mobile' : call.partner_mobile,
'priority': call.priority,
'opportunity_id': call.opportunity_id and call.opportunity_id.id or False,
}
new_id = self.create(cr, uid, vals, context=context)
if action == 'log':
self.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[call.id] = new_id
return phonecall_dict
def _call_create_partner(self, cr, uid, phonecall, context=None):
partner = self.pool.get('res.partner')
partner_id = partner.create(cr, uid, {
'name': phonecall.name,
'user_id': phonecall.user_id.id,
'comment': phonecall.description,
'address': []
})
return partner_id
def on_change_opportunity(self, cr, uid, ids, opportunity_id, context=None):
values = {}
if opportunity_id:
opportunity = self.pool.get('crm.lead').browse(cr, uid, opportunity_id, context=context)
values = {
'section_id' : opportunity.section_id and opportunity.section_id.id or False,
'partner_phone' : opportunity.phone,
'partner_mobile' : opportunity.mobile,
'partner_id' : opportunity.partner_id and opportunity.partner_id.id or False,
}
return {'value' : values}
def _call_set_partner(self, cr, uid, ids, partner_id, context=None):
write_res = self.write(cr, uid, ids, {'partner_id' : partner_id}, context=context)
self._call_set_partner_send_note(cr, uid, ids, context)
return write_res
def _call_create_partner_address(self, cr, uid, phonecall, partner_id, context=None):
address = self.pool.get('res.partner')
return address.create(cr, uid, {
'parent_id': partner_id,
'name': phonecall.name,
'phone': phonecall.partner_phone,
})
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to specified partner_id
:param list ids: phonecalls ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_lead
partner_ids = {}
# If a partner_id is given, force this partner for all elements
force_partner_id = partner_id
for call in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if action == 'create':
partner_id = force_partner_id or self._call_create_partner(cr, uid, call, context=context)
self._call_create_partner_address(cr, uid, call, partner_id, context=context)
self._call_set_partner(cr, uid, [call.id], partner_id, context=context)
partner_ids[call.id] = partner_id
return partner_ids
def redirect_phonecall_view(self, cr, uid, phonecall_id, context=None):
model_data = self.pool.get('ir.model.data')
# Select the view
tree_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_tree_view')
form_view = model_data.get_object_reference(cr, uid, 'crm', 'crm_case_phone_form_view')
search_view = model_data.get_object_reference(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : int(phonecall_id),
'views': [(form_view and form_view[1] or False, 'form'), (tree_view and tree_view[1] or False, 'tree'), (False, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False,
}
return value
def convert_opportunity(self, cr, uid, ids, opportunity_summary=False, partner_id=False, planned_revenue=0.0, probability=0.0, context=None):
partner = self.pool.get('res.partner')
opportunity = self.pool.get('crm.lead')
opportunity_dict = {}
default_contact = False
for call in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = call.partner_id and call.partner_id.id or False
if partner_id:
address_id = partner.address_get(cr, uid, [partner_id])['default']
if address_id:
default_contact = partner.browse(cr, uid, address_id, context=context)
opportunity_id = opportunity.create(cr, uid, {
'name': opportunity_summary or call.name,
'planned_revenue': planned_revenue,
'probability': probability,
'partner_id': partner_id or False,
'mobile': default_contact and default_contact.mobile,
'section_id': call.section_id and call.section_id.id or False,
'description': call.description or False,
'priority': call.priority,
'type': 'opportunity',
'phone': call.partner_phone or False,
'email_from': default_contact and default_contact.email,
})
vals = {
'partner_id': partner_id,
'opportunity_id': opportunity_id,
'state': 'done',
}
self.write(cr, uid, [call.id], vals, context=context)
opportunity_dict[call.id] = opportunity_id
return opportunity_dict
def action_make_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule a meeting on current phonecall.
:return dict: dictionary value for created meeting view
"""
partner_ids = [self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id]
phonecall = self.browse(cr, uid, ids[0], context)
if phonecall.partner_id and phonecall.partner_id.email:
partner_ids.append(phonecall.partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_ids': partner_ids,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_name': phonecall.name,
}
return res
def action_button_convert2opportunity(self, cr, uid, ids, context=None):
"""
Convert a phonecall into an opp and then redirect to the opp view.
:param list ids: list of calls ids to convert (typically contains a single id)
:return dict: containing view information
"""
if len(ids) != 1:
raise osv.except_osv(_('Warning!'),_('It\'s only possible to convert one phonecall at a time.'))
opportunity_dict = self.convert_opportunity(cr, uid, ids, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, opportunity_dict[ids[0]], context)
# ----------------------------------------
# OpenChatter
# ----------------------------------------
def _call_set_partner_send_note(self, cr, uid, ids, context=None):
return self.message_post(cr, uid, ids, body=_("Partner has been <b>created</b>."), context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
orangeduck/PyAutoC | Python27/Lib/encodings/iso8859_2.py | 593 | 13660 | """ Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u02d8' # 0xA2 -> BREVE
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON
u'\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON
u'\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON
u'\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE
u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
anryko/ansible | test/units/module_utils/basic/test_set_mode_if_different.py | 43 | 5287 | # -*- coding: utf-8 -*-
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from itertools import product
import pytest
SYNONYMS_0660 = (
0o660,
'0o660',
'660',
'u+rw-x,g+rw-x,o-rwx',
'u=rw,g=rw,o-rwx',
)
@pytest.fixture
def mock_stats(mocker):
mock_stat1 = mocker.MagicMock()
mock_stat1.st_mode = 0o444
mock_stat2 = mocker.MagicMock()
mock_stat2.st_mode = 0o660
yield {"before": mock_stat1, "after": mock_stat2}
@pytest.fixture
def am_check_mode(am):
am.check_mode = True
yield am
am.check_mode = False
@pytest.fixture
def mock_lchmod(mocker):
m_lchmod = mocker.patch('ansible.module_utils.basic.os.lchmod', return_value=None, create=True)
yield m_lchmod
@pytest.mark.parametrize('previous_changes, check_mode, exists, stdin',
product((True, False), (True, False), (True, False), ({},)),
indirect=['stdin'])
def test_no_mode_given_returns_previous_changes(am, mock_stats, mock_lchmod, mocker, previous_changes, check_mode, exists):
am.check_mode = check_mode
mocker.patch('os.lstat', side_effect=[mock_stats['before']])
m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
m_path_exists = mocker.patch('os.path.exists', return_value=exists)
assert am.set_mode_if_different('/path/to/file', None, previous_changes) == previous_changes
assert not m_lchmod.called
assert not m_path_exists.called
@pytest.mark.parametrize('mode, check_mode, stdin',
product(SYNONYMS_0660, (True, False), ({},)),
indirect=['stdin'])
def test_mode_changed_to_0660(am, mock_stats, mocker, mode, check_mode):
# Note: This is for checking that all the different ways of specifying
# 0660 mode work. It cannot be used to check that setting a mode that is
# not equivalent to 0660 works.
am.check_mode = check_mode
mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after'], mock_stats['after']])
m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
mocker.patch('os.path.exists', return_value=True)
assert am.set_mode_if_different('/path/to/file', mode, False)
if check_mode:
assert not m_lchmod.called
else:
m_lchmod.assert_called_with(b'/path/to/file', 0o660)
@pytest.mark.parametrize('mode, check_mode, stdin',
product(SYNONYMS_0660, (True, False), ({},)),
indirect=['stdin'])
def test_mode_unchanged_when_already_0660(am, mock_stats, mocker, mode, check_mode):
# Note: This is for checking that all the different ways of specifying
# 0660 mode work. It cannot be used to check that setting a mode that is
# not equivalent to 0660 works.
am.check_mode = check_mode
mocker.patch('os.lstat', side_effect=[mock_stats['after'], mock_stats['after'], mock_stats['after']])
m_lchmod = mocker.patch('os.lchmod', return_value=None, create=True)
mocker.patch('os.path.exists', return_value=True)
assert not am.set_mode_if_different('/path/to/file', mode, False)
assert not m_lchmod.called
@pytest.mark.parametrize('check_mode, stdin',
product((True, False), ({},)),
indirect=['stdin'])
def test_missing_lchmod_is_not_link(am, mock_stats, mocker, monkeypatch, check_mode):
"""Some platforms have lchmod (*BSD) others do not (Linux)"""
am.check_mode = check_mode
original_hasattr = hasattr
monkeypatch.delattr(os, 'lchmod', raising=False)
mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']])
mocker.patch('os.path.islink', return_value=False)
mocker.patch('os.path.exists', return_value=True)
m_chmod = mocker.patch('os.chmod', return_value=None)
assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False)
if check_mode:
assert not m_chmod.called
else:
m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660)
@pytest.mark.parametrize('check_mode, stdin',
product((True, False), ({},)),
indirect=['stdin'])
def test_missing_lchmod_is_link(am, mock_stats, mocker, monkeypatch, check_mode):
"""Some platforms have lchmod (*BSD) others do not (Linux)"""
am.check_mode = check_mode
original_hasattr = hasattr
monkeypatch.delattr(os, 'lchmod', raising=False)
mocker.patch('os.lstat', side_effect=[mock_stats['before'], mock_stats['after']])
mocker.patch('os.path.islink', return_value=True)
mocker.patch('os.path.exists', return_value=True)
m_chmod = mocker.patch('os.chmod', return_value=None)
mocker.patch('os.stat', return_value=mock_stats['after'])
assert am.set_mode_if_different('/path/to/file/no_lchmod', 0o660, False)
if check_mode:
assert not m_chmod.called
else:
m_chmod.assert_called_with(b'/path/to/file/no_lchmod', 0o660)
mocker.resetall()
mocker.stopall()
| gpl-3.0 |
chrismeyersfsu/ansible | lib/ansible/modules/cloud/amazon/ec2_elb_facts.py | 23 | 8307 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author:
- "Michael Schultz (github.com/mjschultz)"
- "Fernando Jose Pando (@nand0p)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
required: false
default: null
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: "{{ elb_facts.elbs }}"
'''
try:
import boto.ec2.elb
from boto.ec2.tag import Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class ElbInformation(object):
""" Handles ELB information """
def __init__(self,
module,
names,
region,
**aws_connect_params):
self.module = module
self.names = names
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_elb_connection()
def _get_tags(self, elbname):
params = {'LoadBalancerNames.member.1': elbname}
try:
elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
except:
return {}
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
def _get_elb_listeners(self, listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def _get_health_check(self, health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/', 1)
path = '/{}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
def _get_elb_info(self, elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
'hosted_zone_name': elb.canonical_hosted_zone_name,
'hosted_zone_id': elb.canonical_hosted_zone_name_id,
'instances': [instance.id for instance in elb.instances],
'listeners': self._get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': self._get_health_check(elb.health_check),
'subnets': elb.subnets,
'instances_inservice': [],
'instances_inservice_count': 0,
'instances_outofservice': [],
'instances_outofservice_count': 0,
'instances_inservice_percent': 0.0,
'tags': self._get_tags(elb.name)
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
if elb.instances:
try:
instance_health = self.connection.describe_instance_health(elb.name)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(
float(elb_info['instances_inservice_count']) +
float(elb_info['instances_outofservice_count']))*100
return elb_info
def list_elbs(self):
elb_array = []
try:
all_elbs = self.connection.get_all_load_balancers()
except BotoServerError as err:
self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message))
if all_elbs:
if self.names:
for existing_lb in all_elbs:
if existing_lb.name in self.names:
elb_array.append(existing_lb)
else:
elb_array = all_elbs
return list(map(self._get_elb_info, elb_array))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
names={'default': [], 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
names = module.params['names']
elb_information = ElbInformation(module,
names,
region,
**aws_connect_params)
ec2_facts_result = dict(changed=False,
elbs=elb_information.list_elbs())
module.exit_json(**ec2_facts_result)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
krsjoseph/youtube-dl | youtube_dl/extractor/malemotion.py | 108 | 1423 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class MalemotionIE(InfoExtractor):
_VALID_URL = r'https?://malemotion\.com/video/(.+?)\.(?P<id>.+?)(#|$)'
_TEST = {
'url': 'http://malemotion.com/video/bete-de-concours.ltc',
'md5': '3013e53a0afbde2878bc39998c33e8a5',
'info_dict': {
'id': 'ltc',
'ext': 'mp4',
'title': 'Bête de Concours',
'age_limit': 18,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = compat_urllib_parse_unquote(self._search_regex(
r'<source type="video/mp4" src="(.+?)"', webpage, 'video URL'))
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
video_thumbnail = self._search_regex(
r'<video .+?poster="(.+?)"', webpage, 'thumbnail', fatal=False)
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'mp4',
'preference': 1,
}]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'age_limit': 18,
}
| unlicense |
woel0007/caravel | caravel/lib/python2.7/site-packages/setuptools/py31compat.py | 121 | 1636 | import sys
import unittest
__all__ = ['get_config_vars', 'get_path']
try:
# Python 2.7 or >=3.2
from sysconfig import get_config_vars, get_path
except ImportError:
from distutils.sysconfig import get_config_vars, get_python_lib
def get_path(name):
if name not in ('platlib', 'purelib'):
raise ValueError("Name must be purelib or platlib")
return get_python_lib(name=='platlib')
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory(object):
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp()
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: #removal errors are not the only possible
pass
self.name = None
unittest_main = unittest.main
_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2)
if _PY31:
# on Python 3.1, translate testRunner==None to TextTestRunner
# for compatibility with Python 2.6, 2.7, and 3.2+
def unittest_main(*args, **kwargs):
if 'testRunner' in kwargs and kwargs['testRunner'] is None:
kwargs['testRunner'] = unittest.TextTestRunner
return unittest.main(*args, **kwargs)
| apache-2.0 |
kohlmeier/memfinity | webapp/api.py | 1 | 13562 | import datetime
import json
from google.appengine.api import oauth
from google.appengine.api import users
from google.appengine.ext import ndb
import jsonify
import models
import search
def get_oauth_user():
"""Return the OAuth authenticated user, else raise an exception."""
try:
# Get the db.User that represents the user on whose behalf the
# consumer is making this request.
user = oauth.get_current_user()
except oauth.OAuthRequestError:
# The request was not a valid OAuth request.
raise # TODO(jace) something better to do here?
return user
def get_current_user(handler):
"""Return the UserData for the currently logged in user (or None)."""
user = users.get_current_user()
if not user:
handler.error(401)
return None
return models.UserData.get_for_user_id(user.user_id())
def entity_view(handler, route_root):
"""Query for a single entity by Key."""
path = handler.request.path
response = '{}'
if not path.startswith(route_root) and len(path) > len(route_root):
return response
entity_key = path[len(route_root):]
entity = ndb.Key(urlsafe=entity_key).get()
if entity:
response = jsonify.jsonify(entity, pretty_print=True)
return response
def user_view_current(handler):
"""Return information about the currently logged in user."""
return jsonify.jsonify(users.get_current_user())
def user_view(handler):
"""Query for a single user by Key."""
return entity_view(handler, '/api/user/')
def card_view(handler):
"""Query for a single card by Key."""
return entity_view(handler, '/api/card/')
def card_query(handler):
"""Query for multiple cards.
See main.py for usage examples.
TODO(jace): return a query cursor, too?
"""
tag = handler.request.get("tag", None)
tag_list = tag.split(',') if tag else None
review = handler.request.get('review', None)
reviewAll = handler.request.get('reviewAll', False)
include_followers = handler.request.get('include_followers', None)
user_key = handler.request.get('user', None)
if review and user_key:
handler.error(400)
return "'review' and 'user_key' cannot be used together."
if review and include_followers:
handler.error(400)
return "'review' and 'include_followers' cannot be used together."
if include_followers and not user_key:
handler.error(400)
return "'review' and 'include_followers' cannot be used together."
if review:
# if asked for review cards, get them for the current user only
current_user = get_current_user(handler)
if not current_user:
handler.error(400)
return "must be logged in to quer for review cards."
user_key = current_user.key.urlsafe()
query = models.Card.query()
if include_followers:
user_data = ndb.Key(urlsafe=user_key).get()
if not user_data:
handler.error(500)
return "UserData not found."
user_list = user_data.following + [ndb.Key(urlsafe=user_key)]
query = query.filter(models.Card.user_key.IN(user_list))
elif user_key:
query = query.filter(models.Card.user_key == ndb.Key(urlsafe=user_key))
if tag_list:
query = query.filter(models.Card.tags.IN(tag_list))
if review:
# For review mode, we sort by next scheduled review
query = query.order(models.Card.next_review)
else:
query = query.order(-models.Card.added)
results = query.fetch(100)
response = '[]'
if results:
if review and not reviewAll:
# TODO(jace) if the current user is asking for review cards but
# hasn't explicitly asked to review ALL cards, then we truncate
# the results to include just cards scheduled on or before
# today... the future can wait.
now = datetime.datetime.now()
results = [card for card in results if card.next_review <= now]
response = jsonify.jsonify(results, pretty_print=True)
return response
def card_search(handler):
"""Search cards with query parameter "q".
Returns a (possibly empty) list of JSONified models.Card
entities. See search.py for query processing details.
"""
user = users.get_current_user()
if user:
user_data = models.UserData.get_for_user_id(user.user_id())
user_key = user_data.key.urlsafe()
else:
user_key = None
query = handler.request.get('q', '')
search_results = search.query_cards(query, limit=20, ids_only=True,
user_key=user_key)
results = ndb.get_multi([ndb.Key(urlsafe=result.doc_id)
for result in search_results])
return jsonify.jsonify(results)
def card_add(handler):
"""Add a new Card."""
user_data = get_current_user(handler)
if not user_data:
return
data = json.loads(handler.request.body)
card = models.Card(user_key=user_data.key)
card.update_from_dict(data)
card.update_email_and_nickname()
card.put()
search.insert_cards([card])
# Update the list of all known tags for this user
# Update the list of all known tags for this user
user_data.update_card_tags([], data.get('tags', []))
user_data.put() # TODO(jace): only put if necessary
# TODO(jace) Notify followers
return card.key.urlsafe()
def card_update(handler, delete=False, review=False):
"""Update or Delete an exisiting Card."""
user_data = get_current_user(handler)
if not user_data:
return
path = handler.request.path
route_root = '/api/card/'
err_response = '{}'
if not path.startswith(route_root) and len(path) > len(route_root):
return err_response
card_key = path[len(route_root):]
if card_key.endswith('/review'):
card_key = card_key[:-len('/review')]
card = ndb.Key(urlsafe=card_key).get()
if not card:
return err_response
if user_data.key != card.user_key:
# Disallow modification of other people's cards
return err_response
# Finally ready to do the update
card_tags_original = set(card.tags)
if delete:
card_tags_updated = set()
card.key.delete()
search.delete_cards([card])
elif review:
data = json.loads(handler.request.body)
card.record_review(data.get('grade'))
card_tags_updated = set(card.tags) # unchanged in this case
card.put()
else:
data = json.loads(handler.request.body)
card.update_from_dict(data)
card_tags_updated = set(card.tags)
card.put()
search.insert_cards([card])
# Update the list of all known tags for this user
user_data.update_card_tags(card_tags_original, card_tags_updated)
user_data.put() # TODO(jace): only put if necessary
# TODO(jace) Notify followers
return card.key.urlsafe()
def card_import(handler):
"""Import another user's existing Card to the current user's account.
Called with the form: /api/card/<card_id>/import
"""
user_data = get_current_user(handler)
if not user_data:
return
path = handler.request.path
card_key = path[len('/api/card/'):-len('/import')]
card = ndb.Key(urlsafe=card_key).get()
if not card:
return "card not found"
if user_data.key == card.user_key:
# Disallow importing a card this user already owns
return "can't import your own card"
# Finally ready to do the update
new_card = models.Card()
new_card.populate(**card.to_dict())
new_card.user_key = user_data.key
new_card.update_email_and_nickname()
new_card.put()
search.insert_cards([new_card])
# Update the list of all known tags for this user
user_data.update_card_tags([], new_card.tags)
user_data.put() # TODO(jace): only put if necessary
# TODO(jace) Notify followers
return new_card.key.urlsafe()
def user_follows(handler):
"""Get the users followed by and following a certain users.
Called via a route like:
/api/user/<user_key>/follows
"""
path = handler.request.path
user_key = path[len('/api/user/'):-len('/follows')]
user_data = ndb.Key(urlsafe=user_key).get()
if not user_data:
return "User not found"
# TODO make async
following_data = ndb.get_multi(user_data.following)
followers_data = ndb.get_multi(user_data.followers)
# Finally ready to do the update
data = {
'user_data': user_data,
'following': following_data,
'followers': followers_data
}
return jsonify.jsonify(data)
def user_update(handler):
"""Update an exisiting User."""
user_data = get_current_user(handler)
if not user_data:
return
path = handler.request.path
route_root = '/api/user/'
err_response = '{}'
if not path.startswith(route_root) and len(path) > len(route_root):
return err_response
user_key = path[len(route_root):]
if user_data.key != ndb.Key(urlsafe=user_key):
# Disallow modification of other people's data!
return err_response
# Finally ready to do the update
data = json.loads(handler.request.body)
user_data.update_from_dict(data)
user_data.put() # TODO(jace): only put if necessary
return user_data.key.urlsafe()
def user_follow_unfollow(handler, follow_or_unfollow):
"""Follow a new user."""
user_data = get_current_user(handler)
if not user_data:
return
path = handler.request.path
suffix = '/' + follow_or_unfollow
# path form is '/api/user/<user_key>/[un]follow'
follow_user_key = ndb.Key(urlsafe=path[len('/api/user/'):-len(suffix)])
follow_user = follow_user_key.get()
if not follow_user:
handler.error(500)
return "User to follow not found."
if follow_user_key == user_data.key:
handler.error(500)
return "Users may not follow themselves."
if follow_or_unfollow == 'follow':
if follow_user_key not in user_data.following:
user_data.following.append(follow_user_key)
user_data.put()
if user_data.key not in follow_user.followers:
follow_user.followers.append(user_data.key)
follow_user.put()
elif follow_or_unfollow == 'unfollow':
if follow_user_key in user_data.following:
user_data.following.remove(follow_user_key)
user_data.put()
if user_data.key in follow_user.followers:
follow_user.followers.remove(user_data.key)
follow_user.put()
return user_data.key.urlsafe()
class _JSONCardArchive(object):
"""Simple format for storing cards used by bulk import & export."""
# TODO(chris): unit tests for import / export.
VERSION = "v1"
"""Increment VERSION when the JSON archive format changes."""
def __init__(self, cards=None):
self.cards = cards or []
def _card_to_archive_card(self, card):
"""Python object representation of a model.Card for export."""
obj = {"front": card.front or "",
"back": card.back or "",
"input_format": card.input_format or "text",
}
if card.tags:
obj["tags"] = card.tags
return obj
def get_cards(self):
return self.cards
def to_json(self):
archive = {"format": "JSONCardArchive",
"version": self.VERSION,
"cards": map(self._card_to_archive_card, self.cards),
}
return jsonify.jsonify(archive)
@classmethod
def from_json(cls, json_str):
obj = json.loads(json_str)
assert obj.get("format") == "JSONCardArchive", obj.get("format")
assert obj.get("version") == "v1", obj.get("version")
assert "cards" in obj
cards = []
for card_obj in obj["cards"]:
card = models.Card()
card.update_from_dict(card_obj)
cards.append(card)
archive = _JSONCardArchive(cards)
return archive
def card_bulk_export(handler):
"""Return all cards for the current user in JSON archive format."""
user_data = get_current_user(handler)
if not user_data:
return
query = models.Card.query()
query = query.filter(models.Card.user_key == user_data.key)
# TODO(chris): support export of >1k cards.
# TODO(chris): support streaming JSON w/a fixed memory buffer to
# avoid OOMs due to large card content.
archive = _JSONCardArchive(query.fetch(limit=1000))
return archive.to_json()
def card_bulk_import(handler):
"""Create POSTed cards for the current user."""
user_data = get_current_user(handler)
user = users.get_current_user()
if not user_data or not user:
return
archive_json_str = handler.request.body
archive = _JSONCardArchive.from_json(archive_json_str)
tags = set()
cards = archive.get_cards()
# TODO(chris): support streaming JSON w/a fixed memory buffer to
# avoid OOMs due to large card content.
for card in cards:
card.user_key = user_data.key
card.update_email_and_nickname(user)
tags.update(card.tags)
# Update the list of all known tags for this user.
user_data.update_card_tags([], tags)
ndb.put_multi(cards + [user_data]) # TODO(chris): only put if necessary
search.insert_cards(cards)
| mit |
mylene-campana/hpp-rbprm-corba | script/tests/robot_bigStep_STEVE_path.py | 2 | 2481 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
white=[1.0,1.0,1.0,1.0]
green=[0.23,0.75,0.2,0.5]
yellow=[0.85,0.75,0.15,1]
pink=[1,0.6,1,1]
orange=[1,0.42,0,1]
brown=[0.85,0.75,0.15,0.5]
blue = [0.0, 0.0, 0.8, 1.0]
grey = [0.7,0.7,0.7,1.0]
red = [0.8,0.0,0.0,1.0]
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'robot_test_trunk'
urdfNameRom = ['robot_test_lleg_rom','robot_test_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [-6,6, -3, 3, 0, 1.5])
rbprmBuilder.boundSO3([-0.1,0.1,-3,3,-1.0,1.0])
rbprmBuilder.setFilter(['robot_test_lleg_rom', 'robot_test_rleg_rom'])
rbprmBuilder.setNormalFilter('robot_test_lleg_rom', [0,0,1], 0.5)
rbprmBuilder.setNormalFilter('robot_test_rleg_rom', [0,0,1], 0.5)
#~ from hpp.corbaserver.rbprm. import ProblemSolver
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
r.loadObstacleModel (packageName, "ground_jump_easy", "planning")
q_init = rbprmBuilder.getCurrentConfig ();
q_init [0:3] = [-4, 1, 0.9]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_goal = q_init [::]
#q_goal [0:3] = [-2, 0, 0.9]; r (q_goal) # premiere passerelle
q_goal [0:3] = [4, -1, 0.9]; r (q_goal) # pont
#~ ps.addPathOptimizer("GradientBased")
ps.addPathOptimizer("RandomShortcut")
#ps.client.problem.selectSteeringMethod("SteeringParabola")
#ps.selectPathPlanner("RRTdynamic")
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
r(q_init)
# (nameRoadmap,numberIt,colorNode,radiusSphere,sizeAxis,colorEdge
r.solveAndDisplay("rm",10,white,0.02,1,brown)
#t = ps.solve ()
#r.displayRoadmap("rm",0.005)
#r.displayPathMap("rmPath",0,0.02)
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp(0)
#pp.displayPath(1,blue)
#r.client.gui.setVisibility("path_0_root","ALWAYS_ON_TOP")
pp (1)
#r.client.gui.removeFromGroup("rm",r.sceneName)
#r.client.gui.removeFromGroup("rmPath",r.sceneName)
#r.client.gui.removeFromGroup("path_1_root",r.sceneName)
#~ pp.toFile(1, "/home/stonneau/dev/hpp/src/hpp-rbprm-corba/script/paths/stair.path")
| lgpl-3.0 |
leetcraft/leetcraft.github.io | plugins/liquid_tags/test_audio.py | 273 | 1456 | from . import audio
import pytest
import re
@pytest.mark.parametrize('input,expected', [
('http://foo.bar https://bar.foo',
('http://foo.bar', 'https://bar.foo', None)),
('http://test.foo',
('http://test.foo', None, None)),
('https://test.foo',
('https://test.foo', None, None)),
('http://foo.foo https://bar.bar http://zonk.zonk',
('http://foo.foo', 'https://bar.bar', 'http://zonk.zonk'))
])
def test_regex(input, expected):
assert re.match(audio.AUDIO, input).groups() == expected
@pytest.mark.parametrize('input,expected', [
('http://foo.foo/foo.mp3',
('<audio controls>'
'<source src="http://foo.foo/foo.mp3" type="audio/mpeg">'
'Your browser does not support the audio element.</audio>')),
('https://foo.foo/foo.ogg http://bar.bar/bar.opus',
('<audio controls>'
'<source src="https://foo.foo/foo.ogg" type="audio/ogg">'
'<source src="http://bar.bar/bar.opus" type="audio/ogg">'
'Your browser does not support the audio element.</audio>')),
('http://1.de/1.wav http://2.de/2.mp4 http://3.de/3.ogg',
('<audio controls>'
'<source src="http://1.de/1.wav" type="audio/wav">'
'<source src="http://2.de/2.mp4" type="audio/mp4">'
'<source src="http://3.de/3.ogg" type="audio/ogg">'
'Your browser does not support the audio element.</audio>'))
])
def test_create_html(input, expected):
assert audio.create_html(input) == expected
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/nbconvert/filters/tests/test_markdown.py | 6 | 6613 | # coding: utf-8
"""Tests for conversions from markdown to other formats"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import re
from copy import copy
from functools import partial
from ipython_genutils.py3compat import string_types
from ipython_genutils.testing import decorators as dec
from ...tests.base import TestsBase
from ..pandoc import convert_pandoc
from ..markdown import markdown2html
from jinja2 import Environment
class TestMarkdown(TestsBase):
tests = [
'*test',
'**test',
'*test*',
'_test_',
'__test__',
'__*test*__',
'**test**',
'#test',
'##test',
'test\n----',
'test [link](https://google.com/)',
]
tokens = [
'*test',
'**test',
'test',
'test',
'test',
'test',
'test',
'test',
'test',
'test',
('test', 'https://google.com/'),
]
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2latex(self):
"""markdown2latex test"""
for index, test in enumerate(self.tests):
self._try_markdown(partial(convert_pandoc, from_format='markdown',
to_format='latex'), test,
self.tokens[index])
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2latex_markup(self):
"""markdown2latex with markup kwarg test"""
# This string should be passed through unaltered with pandoc's
# markdown_strict reader
s = '1) arabic number with parenthesis'
self.assertEqual(convert_pandoc(s, 'markdown_strict', 'latex'), s)
# This string should be passed through unaltered with pandoc's
# markdown_strict+tex_math_dollars reader
s = r'$\alpha$ latex math'
# sometimes pandoc uses $math$, sometimes it uses \(math\)
expected = re.compile(r'(\$|\\\()\\alpha(\$|\\\)) latex math')
try:
# py3
assertRegex = self.assertRegex
except AttributeError:
# py2
assertRegex = self.assertRegexpMatches
assertRegex(
convert_pandoc(s, 'markdown_strict+tex_math_dollars', 'latex'),
expected)
@dec.onlyif_cmds_exist('pandoc')
def test_pandoc_extra_args(self):
# pass --no-wrap
s = '\n'.join([
"#latex {{long_line | md2l(['--no-wrap'])}}",
"#rst {{long_line | md2r(['--columns', '5'])}}",
])
long_line = ' '.join(['long'] * 30)
env = Environment()
env.filters.update({
'md2l': lambda code, extra_args: convert_pandoc(
code, from_format='markdown', to_format='latex',
extra_args=extra_args),
'md2r': lambda code, extra_args: convert_pandoc(
code, from_format='markdown', to_format='rst',
extra_args=extra_args),
})
tpl = env.from_string(s)
rendered = tpl.render(long_line=long_line)
_, latex, rst = rendered.split('#')
self.assertEqual(latex.strip(), 'latex %s' % long_line)
self.assertEqual(rst.strip(), 'rst %s' % long_line.replace(' ', '\n'))
def test_markdown2html(self):
"""markdown2html test"""
for index, test in enumerate(self.tests):
self._try_markdown(markdown2html, test, self.tokens[index])
def test_markdown2html_heading_anchors(self):
for md, tokens in [
('# test',
('<h1', '>test', 'id="test"', u'¶</a>', "anchor-link")
),
('###test head space',
('<h3', '>test head space', 'id="test-head-space"', u'¶</a>', "anchor-link")
)
]:
self._try_markdown(markdown2html, md, tokens)
def test_markdown2html_math(self):
# Mathematical expressions should be passed through unaltered
cases = [("\\begin{equation*}\n"
"\\left( \\sum_{k=1}^n a_k b_k \\right)^2 \\leq \\left( \\sum_{k=1}^n a_k^2 \\right) \\left( \\sum_{k=1}^n b_k^2 \\right)\n"
"\\end{equation*}"),
("$$\n"
"a = 1 *3* 5\n"
"$$"),
"$ a = 1 *3* 5 $",
"$s_i = s_{i}\n$"
]
for case in cases:
self.assertIn(case, markdown2html(case))
def test_markdown2html_math_mixed(self):
"""ensure markdown between inline and inline-block math"""
case = """The entries of $C$ are given by the exact formula:
$$
C_{ik} = \sum_{j=1}^n A_{ij} B_{jk}
$$
but there are many ways to _implement_ this computation. $\approx 2mnp$ flops"""
self._try_markdown(markdown2html, case,
case.replace("_implement_", "<em>implement</em>"))
def test_markdown2html_math_paragraph(self):
"""these should all parse without modification"""
cases = [
# https://github.com/ipython/ipython/issues/6724
"""Water that is stored in $t$, $s_t$, must equal the storage content of the previous stage,
$s_{t-1}$, plus a stochastic inflow, $I_t$, minus what is being released in $t$, $r_t$.
With $s_0$ defined as the initial storage content in $t=1$, we have""",
# https://github.com/jupyter/nbviewer/issues/420
"""$C_{ik}$
$$
C_{ik} = \sum_{j=1}
$$
$C_{ik}$""",
"""$m$
$$
C = \begin{pmatrix}
0 & 0 & 0 & \cdots & 0 & 0 & -c_0 \\
0 & 0 & 0 & \cdots & 0 & 1 & -c_{m-1}
\end{pmatrix}
$$
$x^m$""",
"""$r=\overline{1,n}$
$$ {\bf
b}_{i}^{r}(t)=(1-t)\,{\bf b}_{i}^{r-1}(t)+t\,{\bf b}_{i+1}^{r-1}(t),\:
i=\overline{0,n-r}, $$
i.e. the $i^{th}$"""
]
for case in cases:
self.assertIn(case, markdown2html(case))
@dec.onlyif_cmds_exist('pandoc')
def test_markdown2rst(self):
"""markdown2rst test"""
#Modify token array for rst, escape asterik
tokens = copy(self.tokens)
tokens[0] = r'\*test'
tokens[1] = r'\*\*test'
for index, test in enumerate(self.tests):
self._try_markdown(partial(convert_pandoc, from_format='markdown',
to_format='rst'), test, tokens[index])
def _try_markdown(self, method, test, tokens):
results = method(test)
if isinstance(tokens, string_types):
self.assertIn(tokens, results)
else:
for token in tokens:
self.assertIn(token, results)
| mit |
yannickcr/CouchPotatoServer | libs/subliminal/tasks.py | 170 | 2328 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Task', 'ListTask', 'DownloadTask', 'StopTask']
class Task(object):
"""Base class for tasks to use in subliminal"""
pass
class ListTask(Task):
"""List task used by the worker to search for subtitles
:param video: video to search subtitles for
:type video: :class:`~subliminal.videos.Video`
:param list languages: languages to search for
:param string service: name of the service to use
:param config: configuration for the service
:type config: :class:`~subliminal.services.ServiceConfig`
"""
def __init__(self, video, languages, service, config):
super(ListTask, self).__init__()
self.video = video
self.service = service
self.languages = languages
self.config = config
def __repr__(self):
return 'ListTask(%r, %r, %s, %r)' % (self.video, self.languages, self.service, self.config)
class DownloadTask(Task):
"""Download task used by the worker to download subtitles
:param video: video to download subtitles for
:type video: :class:`~subliminal.videos.Video`
:param subtitles: subtitles to download in order of preference
:type subtitles: list of :class:`~subliminal.subtitles.Subtitle`
"""
def __init__(self, video, subtitles):
super(DownloadTask, self).__init__()
self.video = video
self.subtitles = subtitles
def __repr__(self):
return 'DownloadTask(%r, %r)' % (self.video, self.subtitles)
class StopTask(Task):
"""Stop task that will stop the worker"""
pass
| gpl-3.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/Curve.py | 1 | 7072 | # encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
from DrawingArea import DrawingArea
class Curve(DrawingArea):
"""
Object GtkCurve
Signals from GtkCurve:
curve-type-changed ()
Properties from GtkCurve:
curve-type -> GtkCurveType: Curve type
Is this curve linear, spline interpolated, or free-form
min-x -> gfloat: Minimum X
Minimum possible value for X
max-x -> gfloat: Maximum X
Maximum possible X value
min-y -> gfloat: Minimum Y
Minimum possible value for Y
max-y -> gfloat: Maximum Y
Maximum possible value for Y
Signals from GtkWidget:
composited-changed ()
show ()
hide ()
map ()
unmap ()
realize ()
unrealize ()
size-request (GtkRequisition)
size-allocate (GdkRectangle)
state-changed (GtkStateType)
parent-set (GtkWidget)
hierarchy-changed (GtkWidget)
style-set (GtkStyle)
direction-changed (GtkTextDirection)
grab-notify (gboolean)
child-notify (GParam)
mnemonic-activate (gboolean) -> gboolean
grab-focus ()
focus (GtkDirectionType) -> gboolean
move-focus (GtkDirectionType)
event (GdkEvent) -> gboolean
event-after (GdkEvent)
button-press-event (GdkEvent) -> gboolean
button-release-event (GdkEvent) -> gboolean
scroll-event (GdkEvent) -> gboolean
motion-notify-event (GdkEvent) -> gboolean
keynav-failed (GtkDirectionType) -> gboolean
delete-event (GdkEvent) -> gboolean
destroy-event (GdkEvent) -> gboolean
expose-event (GdkEvent) -> gboolean
key-press-event (GdkEvent) -> gboolean
key-release-event (GdkEvent) -> gboolean
enter-notify-event (GdkEvent) -> gboolean
leave-notify-event (GdkEvent) -> gboolean
configure-event (GdkEvent) -> gboolean
focus-in-event (GdkEvent) -> gboolean
focus-out-event (GdkEvent) -> gboolean
map-event (GdkEvent) -> gboolean
unmap-event (GdkEvent) -> gboolean
property-notify-event (GdkEvent) -> gboolean
selection-clear-event (GdkEvent) -> gboolean
selection-request-event (GdkEvent) -> gboolean
selection-notify-event (GdkEvent) -> gboolean
selection-received (GtkSelectionData, guint)
selection-get (GtkSelectionData, guint, guint)
proximity-in-event (GdkEvent) -> gboolean
proximity-out-event (GdkEvent) -> gboolean
drag-leave (GdkDragContext, guint)
drag-begin (GdkDragContext)
drag-end (GdkDragContext)
drag-data-delete (GdkDragContext)
drag-failed (GdkDragContext, GtkDragResult) -> gboolean
drag-motion (GdkDragContext, gint, gint, guint) -> gboolean
drag-drop (GdkDragContext, gint, gint, guint) -> gboolean
drag-data-get (GdkDragContext, GtkSelectionData, guint, guint)
drag-data-received (GdkDragContext, gint, gint, GtkSelectionData, guint, guint)
visibility-notify-event (GdkEvent) -> gboolean
client-event (GdkEvent) -> gboolean
no-expose-event (GdkEvent) -> gboolean
window-state-event (GdkEvent) -> gboolean
damage-event (GdkEvent) -> gboolean
grab-broken-event (GdkEvent) -> gboolean
query-tooltip (gint, gint, gboolean, GtkTooltip) -> gboolean
popup-menu () -> gboolean
show-help (GtkWidgetHelpType) -> gboolean
accel-closures-changed ()
screen-changed (GdkScreen)
can-activate-accel (guint) -> gboolean
Properties from GtkWidget:
name -> gchararray: Widget name
The name of the widget
parent -> GtkContainer: Parent widget
The parent widget of this widget. Must be a Container widget
width-request -> gint: Width request
Override for width request of the widget, or -1 if natural request should be used
height-request -> gint: Height request
Override for height request of the widget, or -1 if natural request should be used
visible -> gboolean: Visible
Whether the widget is visible
sensitive -> gboolean: Sensitive
Whether the widget responds to input
app-paintable -> gboolean: Application paintable
Whether the application will paint directly on the widget
can-focus -> gboolean: Can focus
Whether the widget can accept the input focus
has-focus -> gboolean: Has focus
Whether the widget has the input focus
is-focus -> gboolean: Is focus
Whether the widget is the focus widget within the toplevel
can-default -> gboolean: Can default
Whether the widget can be the default widget
has-default -> gboolean: Has default
Whether the widget is the default widget
receives-default -> gboolean: Receives default
If TRUE, the widget will receive the default action when it is focused
composite-child -> gboolean: Composite child
Whether the widget is part of a composite widget
style -> GtkStyle: Style
The style of the widget, which contains information about how it will look (colors etc)
events -> GdkEventMask: Events
The event mask that decides what kind of GdkEvents this widget gets
extension-events -> GdkExtensionMode: Extension events
The mask that decides what kind of extension events this widget gets
no-show-all -> gboolean: No show all
Whether gtk_widget_show_all() should not affect this widget
has-tooltip -> gboolean: Has tooltip
Whether this widget has a tooltip
tooltip-markup -> gchararray: Tooltip markup
The contents of the tooltip for this widget
tooltip-text -> gchararray: Tooltip Text
The contents of the tooltip for this widget
window -> GdkWindow: Window
The widget's window if it is realized
double-buffered -> gboolean: Double Buffered
Whether or not the widget is double buffered
Signals from GtkObject:
destroy ()
Properties from GtkObject:
user-data -> gpointer: User Data
Anonymous User Data Pointer
Signals from GObject:
notify (GParam)
"""
@classmethod
def do_curve_type_changed(cls, *args, **kwargs): # real signature unknown
pass
def get_vector(self, *args, **kwargs): # real signature unknown
pass
def reset(self, *args, **kwargs): # real signature unknown
pass
def set_curve_type(self, *args, **kwargs): # real signature unknown
pass
def set_gamma(self, *args, **kwargs): # real signature unknown
pass
def set_range(self, *args, **kwargs): # real signature unknown
pass
def set_vector(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__gtype__ = None # (!) real value is ''
| gpl-2.0 |
popazerty/EG-2 | lib/python/Components/Ipkg.py | 5 | 5550 | import os
from enigma import eConsoleAppContainer
from Components.Harddisk import harddiskmanager
from boxbranding import getImageDistro
opkgDestinations = []
opkgStatusPath = ''
def opkgExtraDestinations():
global opkgDestinations
return ''.join([" --add-dest %s:%s" % (i,i) for i in opkgDestinations])
def opkgAddDestination(mountpoint):
global opkgDestinations
if mountpoint not in opkgDestinations:
opkgDestinations.append(mountpoint)
print "[Ipkg] Added to OPKG destinations:", mountpoint
def onPartitionChange(why, part):
global opkgDestinations
global opkgStatusPath
mountpoint = os.path.normpath(part.mountpoint)
if mountpoint and not mountpoint.startswith('/media/net'):
if why == 'add':
if opkgStatusPath == '':
# recent opkg versions
opkgStatusPath = 'var/lib/opkg/status'
if not os.path.exists(os.path.join('/', opkgStatusPath)):
# older opkg versions
opkgStatusPath = 'usr/lib/opkg/status'
if os.path.exists(os.path.join(mountpoint, opkgStatusPath)):
opkgAddDestination(mountpoint)
elif why == 'remove':
try:
opkgDestinations.remove(mountpoint)
print "[Ipkg] Removed from OPKG destinations:", mountpoint
except:
pass
harddiskmanager.on_partition_list_change.append(onPartitionChange)
for part in harddiskmanager.getMountedPartitions():
onPartitionChange('add', part)
class IpkgComponent:
EVENT_INSTALL = 0
EVENT_DOWNLOAD = 1
EVENT_INFLATING = 2
EVENT_CONFIGURING = 3
EVENT_REMOVE = 4
EVENT_UPGRADE = 5
EVENT_LISTITEM = 9
EVENT_DONE = 10
EVENT_ERROR = 11
EVENT_MODIFIED = 12
CMD_INSTALL = 0
CMD_LIST = 1
CMD_REMOVE = 2
CMD_UPDATE = 3
CMD_UPGRADE = 4
CMD_UPGRADE_LIST = 5
def __init__(self, ipkg = 'opkg'):
self.ipkg = ipkg
self.cmd = eConsoleAppContainer()
self.cache = None
self.callbackList = []
self.setCurrentCommand()
def setCurrentCommand(self, command = None):
self.currentCommand = command
def runCmdEx(self, cmd):
self.runCmd(opkgExtraDestinations() + ' ' + cmd)
def runCmd(self, cmd):
print "executing", self.ipkg, cmd
self.cmd.appClosed.append(self.cmdFinished)
self.cmd.dataAvail.append(self.cmdData)
if self.cmd.execute(self.ipkg + " " + cmd):
self.cmdFinished(-1)
def startCmd(self, cmd, args = None):
if cmd == self.CMD_UPDATE:
for fn in os.listdir('/var/lib/opkg'):
if fn.startswith(getImageDistro()):
os.remove('/var/lib/opkg/'+fn)
self.runCmdEx("update")
elif cmd == self.CMD_UPGRADE:
append = ""
if args["test_only"]:
append = " -test"
self.runCmdEx("upgrade" + append)
elif cmd == self.CMD_LIST:
self.fetchedList = []
if args['installed_only']:
self.runCmdEx("list_installed")
else:
self.runCmd("list")
elif cmd == self.CMD_INSTALL:
self.runCmd("install " + args['package'])
elif cmd == self.CMD_REMOVE:
self.runCmd("remove " + args['package'])
elif cmd == self.CMD_UPGRADE_LIST:
self.fetchedList = []
self.runCmdEx("list-upgradable")
self.setCurrentCommand(cmd)
def cmdFinished(self, retval):
self.callCallbacks(self.EVENT_DONE)
self.cmd.appClosed.remove(self.cmdFinished)
self.cmd.dataAvail.remove(self.cmdData)
def cmdData(self, data):
# print "data:", data
if self.cache is None:
self.cache = data
else:
self.cache += data
if '\n' in data:
splitcache = self.cache.split('\n')
if self.cache[-1] == '\n':
iteration = splitcache
self.cache = None
else:
iteration = splitcache[:-1]
self.cache = splitcache[-1]
for mydata in iteration:
if mydata != '':
self.parseLine(mydata)
def parseLine(self, data):
if self.currentCommand in (self.CMD_LIST, self.CMD_UPGRADE_LIST):
item = data.split(' - ', 2)
self.fetchedList.append(item)
self.callCallbacks(self.EVENT_LISTITEM, item)
return
try:
if data.startswith('Downloading'):
self.callCallbacks(self.EVENT_DOWNLOAD, data.split(' ', 5)[1].strip())
elif data.startswith('Upgrading'):
self.callCallbacks(self.EVENT_UPGRADE, data.split(' ', 2)[1])
elif data.startswith('Installing'):
self.callCallbacks(self.EVENT_INSTALL, data.split(' ', 2)[1])
elif data.startswith('Removing'):
self.callCallbacks(self.EVENT_REMOVE, data.split(' ', 3)[2])
elif data.startswith('Configuring'):
self.callCallbacks(self.EVENT_CONFIGURING, data.split(' ', 2)[1])
elif data.startswith('An error occurred'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.startswith('Failed to download'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.startswith('ipkg_download: ERROR:'):
self.callCallbacks(self.EVENT_ERROR, None)
elif data.find('Configuration file \'') >= 0:
# Note: the config file update question doesn't end with a newline, so
# if we get multiple config file update questions, the next ones
# don't necessarily start at the beginning of a line
self.callCallbacks(self.EVENT_MODIFIED, data.split(' \'', 3)[1][:-1])
except Exception, ex:
print "[Ipkg] Failed to parse: '%s'" % data
print "[Ipkg]", ex
def callCallbacks(self, event, param = None):
for callback in self.callbackList:
callback(event, param)
def addCallback(self, callback):
self.callbackList.append(callback)
def removeCallback(self, callback):
self.callbackList.remove(callback)
def getFetchedList(self):
return self.fetchedList
def stop(self):
self.cmd.kill()
def isRunning(self):
return self.cmd.running()
def write(self, what):
if what:
# We except unterminated commands
what += "\n"
self.cmd.write(what, len(what))
| gpl-2.0 |
jimkmc/micropython | tests/bytecode/pylib-tests/pipes.py | 172 | 8916 | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
""" # '
import re
import os
import tempfile
# we import the quote function rather than the module for backward compat
# (quote used to be an undocumented but used function in pipes)
from shlex import quote
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError('Template.append: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.append: bad kind %r' % (kind,))
if kind == SOURCE:
raise ValueError('Template.append: SOURCE can only be prepended')
if self.steps and self.steps[-1][1] == SINK:
raise ValueError('Template.append: already ends with SINK')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.append: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.append: missing $OUT in cmd')
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError('Template.prepend: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.prepend: bad kind %r' % (kind,))
if kind == SINK:
raise ValueError('Template.prepend: SINK can only be appended')
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError('Template.prepend: already begins with SOURCE')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.prepend: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.prepend: missing $OUT in cmd')
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
% (rw,))
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError('Template.open_r: pipeline ends width SINK')
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError('Template.open_w: pipeline begins with SOURCE')
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print(cmd)
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
| mit |
klahnakoski/ActiveData | vendor/jx_base/expressions/not_op.py | 1 | 1316 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions.expression import Expression
from jx_base.language import is_op
from mo_imports import export
from mo_json import BOOLEAN
class NotOp(Expression):
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, term)
self.term = term
def __data__(self):
return {"not": self.term.__data__()}
def __eq__(self, other):
if not is_op(other, NotOp):
return False
return self.term == other.term
def vars(self):
return self.term.vars()
def map(self, map_):
return NotOp(self.term.map(map_))
def missing(self, lang):
return (self.term).missing(lang)
def invert(self, lang):
return self.term.partial_eval(lang)
def partial_eval(self, lang):
return self.term.invert(lang)
export("jx_base.expressions.and_op", NotOp)
export("jx_base.expressions.exists_op", NotOp)
export("jx_base.expressions.expression", NotOp)
| mpl-2.0 |
numerigraphe/odoomrp-wip | quality_control_claim/__openerp__.py | 2 | 1574 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2015 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Quality control claim",
"version": "1.0",
"author": "OdooMRP team",
"website": "http://www.odoomrp.com",
"contributors": [
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com",
"Ana Juaristi <ajuaristo@gmail.com>",
"Alfredo de la Fuente <alfredodelafuente@avanzosc.es>",
],
"category": "Quality control",
"depends": [
'quality_control',
'crm_claim',
],
"data": [
'views/qc_test_view.xml',
'views/qc_inspection_view.xml',
],
"installable": True,
}
| agpl-3.0 |
zelotoj/ZLCommon | ZL/web/session.py | 11 | 10789 | """
Session Management
(from web.py)
"""
import os, time, datetime, random, base64
import os.path
from copy import deepcopy
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import hashlib
sha1 = hashlib.sha1
except ImportError:
import sha
sha1 = sha.new
import utils
import webapi as web
__all__ = [
'Session', 'SessionExpired',
'Store', 'DiskStore', 'DBStore',
]
web.config.session_parameters = utils.storage({
'cookie_name': 'webpy_session_id',
'cookie_domain': None,
'cookie_path' : None,
'timeout': 86400, #24 * 60 * 60, # 24 hours in seconds
'ignore_expiry': True,
'ignore_change_ip': True,
'secret_key': 'fLjUfxqXtfNoIldA0A0J',
'expired_message': 'Session expired',
'httponly': True,
'secure': False
})
class SessionExpired(web.HTTPError):
def __init__(self, message):
web.HTTPError.__init__(self, '200 OK', {}, data=message)
class Session(object):
"""Session management for web.py
"""
__slots__ = [
"store", "_initializer", "_last_cleanup_time", "_config", "_data",
"__getitem__", "__setitem__", "__delitem__"
]
def __init__(self, app, store, initializer=None):
self.store = store
self._initializer = initializer
self._last_cleanup_time = 0
self._config = utils.storage(web.config.session_parameters)
self._data = utils.threadeddict()
self.__getitem__ = self._data.__getitem__
self.__setitem__ = self._data.__setitem__
self.__delitem__ = self._data.__delitem__
if app:
app.add_processor(self._processor)
def __contains__(self, name):
return name in self._data
def __getattr__(self, name):
return getattr(self._data, name)
def __setattr__(self, name, value):
if name in self.__slots__:
object.__setattr__(self, name, value)
else:
setattr(self._data, name, value)
def __delattr__(self, name):
delattr(self._data, name)
def _processor(self, handler):
"""Application processor to setup session for every request"""
self._cleanup()
self._load()
try:
return handler()
finally:
self._save()
def _load(self):
"""Load the session from the store, by the id from cookie"""
cookie_name = self._config.cookie_name
cookie_domain = self._config.cookie_domain
cookie_path = self._config.cookie_path
httponly = self._config.httponly
self.session_id = web.cookies().get(cookie_name)
# protection against session_id tampering
if self.session_id and not self._valid_session_id(self.session_id):
self.session_id = None
self._check_expiry()
if self.session_id:
d = self.store[self.session_id]
self.update(d)
self._validate_ip()
if not self.session_id:
self.session_id = self._generate_session_id()
if self._initializer:
if isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
elif hasattr(self._initializer, '__call__'):
self._initializer()
self.ip = web.ctx.ip
def _check_expiry(self):
# check for expiry
if self.session_id and self.session_id not in self.store:
if self._config.ignore_expiry:
self.session_id = None
else:
return self.expired()
def _validate_ip(self):
# check for change of IP
if self.session_id and self.get('ip', None) != web.ctx.ip:
if not self._config.ignore_change_ip:
return self.expired()
def _save(self):
if not self.get('_killed'):
self._setcookie(self.session_id)
self.store[self.session_id] = dict(self._data)
else:
self._setcookie(self.session_id, expires=-1)
def _setcookie(self, session_id, expires='', **kw):
cookie_name = self._config.cookie_name
cookie_domain = self._config.cookie_domain
cookie_path = self._config.cookie_path
httponly = self._config.httponly
secure = self._config.secure
web.setcookie(cookie_name, session_id, expires=expires, domain=cookie_domain, httponly=httponly, secure=secure, path=cookie_path)
def _generate_session_id(self):
"""Generate a random id for session"""
while True:
rand = os.urandom(16)
now = time.time()
secret_key = self._config.secret_key
session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key))
session_id = session_id.hexdigest()
if session_id not in self.store:
break
return session_id
def _valid_session_id(self, session_id):
rx = utils.re_compile('^[0-9a-fA-F]+$')
return rx.match(session_id)
def _cleanup(self):
"""Cleanup the stored sessions"""
current_time = time.time()
timeout = self._config.timeout
if current_time - self._last_cleanup_time > timeout:
self.store.cleanup(timeout)
self._last_cleanup_time = current_time
def expired(self):
"""Called when an expired session is atime"""
self._killed = True
self._save()
raise SessionExpired(self._config.expired_message)
def kill(self):
"""Kill the session, make it no longer available"""
del self.store[self.session_id]
self._killed = True
class Store:
"""Base class for session stores"""
def __contains__(self, key):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, value):
raise NotImplementedError
def cleanup(self, timeout):
"""removes all the expired sessions"""
raise NotImplementedError
def encode(self, session_dict):
"""encodes session dict as a string"""
pickled = pickle.dumps(session_dict)
return base64.encodestring(pickled)
def decode(self, session_data):
"""decodes the data to get back the session dict """
pickled = base64.decodestring(session_data)
return pickle.loads(pickled)
class DiskStore(Store):
"""
Store for saving a session on disk.
>>> import tempfile
>>> root = tempfile.mkdtemp()
>>> s = DiskStore(root)
>>> s['a'] = 'foo'
>>> s['a']
'foo'
>>> time.sleep(0.01)
>>> s.cleanup(0.01)
>>> s['a']
Traceback (most recent call last):
...
KeyError: 'a'
"""
def __init__(self, root):
# if the storage root doesn't exists, create it.
if not os.path.exists(root):
os.makedirs(
os.path.abspath(root)
)
self.root = root
def _get_path(self, key):
if os.path.sep in key:
raise ValueError, "Bad key: %s" % repr(key)
return os.path.join(self.root, key)
def __contains__(self, key):
path = self._get_path(key)
return os.path.exists(path)
def __getitem__(self, key):
path = self._get_path(key)
if os.path.exists(path):
pickled = open(path).read()
return self.decode(pickled)
else:
raise KeyError, key
def __setitem__(self, key, value):
path = self._get_path(key)
pickled = self.encode(value)
try:
f = open(path, 'w')
try:
f.write(pickled)
finally:
f.close()
except IOError:
pass
def __delitem__(self, key):
path = self._get_path(key)
if os.path.exists(path):
os.remove(path)
def cleanup(self, timeout):
now = time.time()
for f in os.listdir(self.root):
path = self._get_path(f)
atime = os.stat(path).st_atime
if now - atime > timeout :
os.remove(path)
class DBStore(Store):
"""Store for saving a session in database
Needs a table with the following columns:
session_id CHAR(128) UNIQUE NOT NULL,
atime DATETIME NOT NULL default current_timestamp,
data TEXT
"""
def __init__(self, db, table_name):
self.db = db
self.table = table_name
def __contains__(self, key):
data = self.db.select(self.table, where="session_id=$key", vars=locals())
return bool(list(data))
def __getitem__(self, key):
now = datetime.datetime.now()
try:
s = self.db.select(self.table, where="session_id=$key", vars=locals())[0]
self.db.update(self.table, where="session_id=$key", atime=now, vars=locals())
except IndexError:
raise KeyError
else:
return self.decode(s.data)
def __setitem__(self, key, value):
pickled = self.encode(value)
now = datetime.datetime.now()
if key in self:
self.db.update(self.table, where="session_id=$key", data=pickled,atime=now, vars=locals())
else:
self.db.insert(self.table, False, session_id=key, atime=now, data=pickled )
def __delitem__(self, key):
self.db.delete(self.table, where="session_id=$key", vars=locals())
def cleanup(self, timeout):
timeout = datetime.timedelta(timeout/(24.0*60*60)) #timedelta takes numdays as arg
last_allowed_time = datetime.datetime.now() - timeout
self.db.delete(self.table, where="$last_allowed_time > atime", vars=locals())
class ShelfStore:
"""Store for saving session using `shelve` module.
import shelve
store = ShelfStore(shelve.open('session.shelf'))
XXX: is shelve thread-safe?
"""
def __init__(self, shelf):
self.shelf = shelf
def __contains__(self, key):
return key in self.shelf
def __getitem__(self, key):
atime, v = self.shelf[key]
self[key] = v # update atime
return v
def __setitem__(self, key, value):
self.shelf[key] = time.time(), value
def __delitem__(self, key):
try:
del self.shelf[key]
except KeyError:
pass
def cleanup(self, timeout):
now = time.time()
for k in self.shelf.keys():
atime, v = self.shelf[k]
if now - atime > timeout :
del self[k]
if __name__ == '__main__' :
import doctest
doctest.testmod()
| gpl-2.0 |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/autobahn/wamp/test/test_exception.py | 2 | 2120 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from unittest2 import TestCase
from six import PY3
from autobahn.wamp.exception import ApplicationError
class ApplicationErrorTestCase(TestCase):
def test_unicode_str(self):
"""
Unicode arguments in ApplicationError will not raise an exception when
str()'d.
"""
error = ApplicationError(u"some.url", u"\u2603")
if PY3:
self.assertIn(u"\u2603", str(error))
else:
self.assertIn("\\u2603", str(error))
def test_unicode_errormessage(self):
"""
Unicode arguments in ApplicationError will not raise an exception when
the error_message method is called.
"""
error = ApplicationError(u"some.url", u"\u2603")
print(error.error_message())
self.assertIn(u"\u2603", error.error_message())
| gpl-3.0 |
powlo/script.module.pydevd | lib/pydevd_attach_to_process/winappdbg/breakpoint.py | 102 | 168168 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Breakpoints.
@group Breakpoints:
Breakpoint, CodeBreakpoint, PageBreakpoint, HardwareBreakpoint,
BufferWatch, Hook, ApiHook
@group Warnings:
BreakpointWarning, BreakpointCallbackWarning
"""
__revision__ = "$Id$"
__all__ = [
# Base class for breakpoints
'Breakpoint',
# Breakpoint implementations
'CodeBreakpoint',
'PageBreakpoint',
'HardwareBreakpoint',
# Hooks and watches
'Hook',
'ApiHook',
'BufferWatch',
# Warnings
'BreakpointWarning',
'BreakpointCallbackWarning',
]
from winappdbg import win32
from winappdbg import compat
import sys
from winappdbg.process import Process, Thread
from winappdbg.util import DebugRegister, MemoryAddresses
from winappdbg.textio import HexDump
import ctypes
import warnings
import traceback
#==============================================================================
class BreakpointWarning (UserWarning):
"""
This warning is issued when a non-fatal error occurs that's related to
breakpoints.
"""
class BreakpointCallbackWarning (RuntimeWarning):
"""
This warning is issued when an uncaught exception was raised by a
breakpoint's user-defined callback.
"""
#==============================================================================
class Breakpoint (object):
"""
Base class for breakpoints.
Here's the breakpoints state machine.
@see: L{CodeBreakpoint}, L{PageBreakpoint}, L{HardwareBreakpoint}
@group Breakpoint states:
DISABLED, ENABLED, ONESHOT, RUNNING
@group State machine:
hit, disable, enable, one_shot, running,
is_disabled, is_enabled, is_one_shot, is_running,
get_state, get_state_name
@group Information:
get_address, get_size, get_span, is_here
@group Conditional breakpoints:
is_conditional, is_unconditional,
get_condition, set_condition, eval_condition
@group Automatic breakpoints:
is_automatic, is_interactive,
get_action, set_action, run_action
@cvar DISABLED: I{Disabled} S{->} Enabled, OneShot
@cvar ENABLED: I{Enabled} S{->} I{Running}, Disabled
@cvar ONESHOT: I{OneShot} S{->} I{Disabled}
@cvar RUNNING: I{Running} S{->} I{Enabled}, Disabled
@type DISABLED: int
@type ENABLED: int
@type ONESHOT: int
@type RUNNING: int
@type stateNames: dict E{lb} int S{->} str E{rb}
@cvar stateNames: User-friendly names for each breakpoint state.
@type typeName: str
@cvar typeName: User friendly breakpoint type string.
"""
# I don't think transitions Enabled <-> OneShot should be allowed... plus
# it would require special handling to avoid setting the same bp twice
DISABLED = 0
ENABLED = 1
ONESHOT = 2
RUNNING = 3
typeName = 'breakpoint'
stateNames = {
DISABLED : 'disabled',
ENABLED : 'enabled',
ONESHOT : 'one shot',
RUNNING : 'running',
}
def __init__(self, address, size = 1, condition = True, action = None):
"""
Breakpoint object.
@type address: int
@param address: Memory address for breakpoint.
@type size: int
@param size: Size of breakpoint in bytes (defaults to 1).
@type condition: function
@param condition: (Optional) Condition callback function.
The callback signature is::
def condition_callback(event):
return True # returns True or False
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@type action: function
@param action: (Optional) Action callback function.
If specified, the event is handled by this callback instead of
being dispatched normally.
The callback signature is::
def action_callback(event):
pass # no return value
Where B{event} is an L{Event} object.
"""
self.__address = address
self.__size = size
self.__state = self.DISABLED
self.set_condition(condition)
self.set_action(action)
def __repr__(self):
if self.is_disabled():
state = 'Disabled'
else:
state = 'Active (%s)' % self.get_state_name()
if self.is_conditional():
condition = 'conditional'
else:
condition = 'unconditional'
name = self.typeName
size = self.get_size()
if size == 1:
address = HexDump.address( self.get_address() )
else:
begin = self.get_address()
end = begin + size
begin = HexDump.address(begin)
end = HexDump.address(end)
address = "range %s-%s" % (begin, end)
msg = "<%s %s %s at remote address %s>"
msg = msg % (state, condition, name, address)
return msg
#------------------------------------------------------------------------------
def is_disabled(self):
"""
@rtype: bool
@return: C{True} if the breakpoint is in L{DISABLED} state.
"""
return self.get_state() == self.DISABLED
def is_enabled(self):
"""
@rtype: bool
@return: C{True} if the breakpoint is in L{ENABLED} state.
"""
return self.get_state() == self.ENABLED
def is_one_shot(self):
"""
@rtype: bool
@return: C{True} if the breakpoint is in L{ONESHOT} state.
"""
return self.get_state() == self.ONESHOT
def is_running(self):
"""
@rtype: bool
@return: C{True} if the breakpoint is in L{RUNNING} state.
"""
return self.get_state() == self.RUNNING
def is_here(self, address):
"""
@rtype: bool
@return: C{True} if the address is within the range of the breakpoint.
"""
begin = self.get_address()
end = begin + self.get_size()
return begin <= address < end
def get_address(self):
"""
@rtype: int
@return: The target memory address for the breakpoint.
"""
return self.__address
def get_size(self):
"""
@rtype: int
@return: The size in bytes of the breakpoint.
"""
return self.__size
def get_span(self):
"""
@rtype: tuple( int, int )
@return:
Starting and ending address of the memory range
covered by the breakpoint.
"""
address = self.get_address()
size = self.get_size()
return ( address, address + size )
def get_state(self):
"""
@rtype: int
@return: The current state of the breakpoint
(L{DISABLED}, L{ENABLED}, L{ONESHOT}, L{RUNNING}).
"""
return self.__state
def get_state_name(self):
"""
@rtype: str
@return: The name of the current state of the breakpoint.
"""
return self.stateNames[ self.get_state() ]
#------------------------------------------------------------------------------
def is_conditional(self):
"""
@see: L{__init__}
@rtype: bool
@return: C{True} if the breakpoint has a condition callback defined.
"""
# Do not evaluate as boolean! Test for identity with True instead.
return self.__condition is not True
def is_unconditional(self):
"""
@rtype: bool
@return: C{True} if the breakpoint doesn't have a condition callback defined.
"""
# Do not evaluate as boolean! Test for identity with True instead.
return self.__condition is True
def get_condition(self):
"""
@rtype: bool, function
@return: Returns the condition callback for conditional breakpoints.
Returns C{True} for unconditional breakpoints.
"""
return self.__condition
def set_condition(self, condition = True):
"""
Sets a new condition callback for the breakpoint.
@see: L{__init__}
@type condition: function
@param condition: (Optional) Condition callback function.
"""
if condition is None:
self.__condition = True
else:
self.__condition = condition
def eval_condition(self, event):
"""
Evaluates the breakpoint condition, if any was set.
@type event: L{Event}
@param event: Debug event triggered by the breakpoint.
@rtype: bool
@return: C{True} to dispatch the event, C{False} otherwise.
"""
condition = self.get_condition()
if condition is True: # shortcut for unconditional breakpoints
return True
if callable(condition):
try:
return bool( condition(event) )
except Exception:
e = sys.exc_info()[1]
msg = ("Breakpoint condition callback %r"
" raised an exception: %s")
msg = msg % (condition, traceback.format_exc(e))
warnings.warn(msg, BreakpointCallbackWarning)
return False
return bool( condition ) # force evaluation now
#------------------------------------------------------------------------------
def is_automatic(self):
"""
@rtype: bool
@return: C{True} if the breakpoint has an action callback defined.
"""
return self.__action is not None
def is_interactive(self):
"""
@rtype: bool
@return:
C{True} if the breakpoint doesn't have an action callback defined.
"""
return self.__action is None
def get_action(self):
"""
@rtype: bool, function
@return: Returns the action callback for automatic breakpoints.
Returns C{None} for interactive breakpoints.
"""
return self.__action
def set_action(self, action = None):
"""
Sets a new action callback for the breakpoint.
@type action: function
@param action: (Optional) Action callback function.
"""
self.__action = action
def run_action(self, event):
"""
Executes the breakpoint action callback, if any was set.
@type event: L{Event}
@param event: Debug event triggered by the breakpoint.
"""
action = self.get_action()
if action is not None:
try:
return bool( action(event) )
except Exception:
e = sys.exc_info()[1]
msg = ("Breakpoint action callback %r"
" raised an exception: %s")
msg = msg % (action, traceback.format_exc(e))
warnings.warn(msg, BreakpointCallbackWarning)
return False
return True
#------------------------------------------------------------------------------
def __bad_transition(self, state):
"""
Raises an C{AssertionError} exception for an invalid state transition.
@see: L{stateNames}
@type state: int
@param state: Intended breakpoint state.
@raise Exception: Always.
"""
statemsg = ""
oldState = self.stateNames[ self.get_state() ]
newState = self.stateNames[ state ]
msg = "Invalid state transition (%s -> %s)" \
" for breakpoint at address %s"
msg = msg % (oldState, newState, HexDump.address(self.get_address()))
raise AssertionError(msg)
def disable(self, aProcess, aThread):
"""
Transition to L{DISABLED} state.
- When hit: OneShot S{->} Disabled
- Forced by user: Enabled, OneShot, Running S{->} Disabled
- Transition from running state may require special handling
by the breakpoint implementation class.
@type aProcess: L{Process}
@param aProcess: Process object.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
## if self.__state not in (self.ENABLED, self.ONESHOT, self.RUNNING):
## self.__bad_transition(self.DISABLED)
self.__state = self.DISABLED
def enable(self, aProcess, aThread):
"""
Transition to L{ENABLED} state.
- When hit: Running S{->} Enabled
- Forced by user: Disabled, Running S{->} Enabled
- Transition from running state may require special handling
by the breakpoint implementation class.
@type aProcess: L{Process}
@param aProcess: Process object.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
## if self.__state not in (self.DISABLED, self.RUNNING):
## self.__bad_transition(self.ENABLED)
self.__state = self.ENABLED
def one_shot(self, aProcess, aThread):
"""
Transition to L{ONESHOT} state.
- Forced by user: Disabled S{->} OneShot
@type aProcess: L{Process}
@param aProcess: Process object.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
## if self.__state != self.DISABLED:
## self.__bad_transition(self.ONESHOT)
self.__state = self.ONESHOT
def running(self, aProcess, aThread):
"""
Transition to L{RUNNING} state.
- When hit: Enabled S{->} Running
@type aProcess: L{Process}
@param aProcess: Process object.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
if self.__state != self.ENABLED:
self.__bad_transition(self.RUNNING)
self.__state = self.RUNNING
def hit(self, event):
"""
Notify a breakpoint that it's been hit.
This triggers the corresponding state transition and sets the
C{breakpoint} property of the given L{Event} object.
@see: L{disable}, L{enable}, L{one_shot}, L{running}
@type event: L{Event}
@param event: Debug event to handle (depends on the breakpoint type).
@raise AssertionError: Disabled breakpoints can't be hit.
"""
aProcess = event.get_process()
aThread = event.get_thread()
state = self.get_state()
event.breakpoint = self
if state == self.ENABLED:
self.running(aProcess, aThread)
elif state == self.RUNNING:
self.enable(aProcess, aThread)
elif state == self.ONESHOT:
self.disable(aProcess, aThread)
elif state == self.DISABLED:
# this should not happen
msg = "Hit a disabled breakpoint at address %s"
msg = msg % HexDump.address( self.get_address() )
warnings.warn(msg, BreakpointWarning)
#==============================================================================
# XXX TODO
# Check if the user is trying to set a code breakpoint on a memory mapped file,
# so we don't end up writing the int3 instruction in the file by accident.
class CodeBreakpoint (Breakpoint):
"""
Code execution breakpoints (using an int3 opcode).
@see: L{Debug.break_at}
@type bpInstruction: str
@cvar bpInstruction: Breakpoint instruction for the current processor.
"""
typeName = 'code breakpoint'
if win32.arch in (win32.ARCH_I386, win32.ARCH_AMD64):
bpInstruction = '\xCC' # int 3
def __init__(self, address, condition = True, action = None):
"""
Code breakpoint object.
@see: L{Breakpoint.__init__}
@type address: int
@param address: Memory address for breakpoint.
@type condition: function
@param condition: (Optional) Condition callback function.
@type action: function
@param action: (Optional) Action callback function.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
msg = "Code breakpoints not supported for %s" % win32.arch
raise NotImplementedError(msg)
Breakpoint.__init__(self, address, len(self.bpInstruction),
condition, action)
self.__previousValue = self.bpInstruction
def __set_bp(self, aProcess):
"""
Writes a breakpoint instruction at the target address.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
address = self.get_address()
self.__previousValue = aProcess.read(address, len(self.bpInstruction))
if self.__previousValue == self.bpInstruction:
msg = "Possible overlapping code breakpoints at %s"
msg = msg % HexDump.address(address)
warnings.warn(msg, BreakpointWarning)
aProcess.write(address, self.bpInstruction)
def __clear_bp(self, aProcess):
"""
Restores the original byte at the target address.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
address = self.get_address()
currentValue = aProcess.read(address, len(self.bpInstruction))
if currentValue == self.bpInstruction:
# Only restore the previous value if the int3 is still there.
aProcess.write(self.get_address(), self.__previousValue)
else:
self.__previousValue = currentValue
msg = "Overwritten code breakpoint at %s"
msg = msg % HexDump.address(address)
warnings.warn(msg, BreakpointWarning)
def disable(self, aProcess, aThread):
if not self.is_disabled() and not self.is_running():
self.__clear_bp(aProcess)
super(CodeBreakpoint, self).disable(aProcess, aThread)
def enable(self, aProcess, aThread):
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aProcess)
super(CodeBreakpoint, self).enable(aProcess, aThread)
def one_shot(self, aProcess, aThread):
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aProcess)
super(CodeBreakpoint, self).one_shot(aProcess, aThread)
# FIXME race condition here (however unlikely)
# If another thread runs on over the target address while
# the breakpoint is in RUNNING state, we'll miss it. There
# is a solution to this but it's somewhat complicated, so
# I'm leaving it for another version of the debugger. :(
def running(self, aProcess, aThread):
if self.is_enabled():
self.__clear_bp(aProcess)
aThread.set_tf()
super(CodeBreakpoint, self).running(aProcess, aThread)
#==============================================================================
# TODO:
# * If the original page was already a guard page, the exception should be
# passed to the debugee instead of being handled by the debugger.
# * If the original page was already a guard page, it should NOT be converted
# to a no-access page when disabling the breakpoint.
# * If the page permissions were modified after the breakpoint was enabled,
# no change should be done on them when disabling the breakpoint. For this
# we need to remember the original page permissions instead of blindly
# setting and clearing the guard page bit on them.
# * Some pages seem to be "magic" and resist all attempts at changing their
# protect bits (for example the pages where the PEB and TEB reside). Maybe
# a more descriptive error message could be shown in this case.
class PageBreakpoint (Breakpoint):
"""
Page access breakpoint (using guard pages).
@see: L{Debug.watch_buffer}
@group Information:
get_size_in_pages
"""
typeName = 'page breakpoint'
#------------------------------------------------------------------------------
def __init__(self, address, pages = 1, condition = True, action = None):
"""
Page breakpoint object.
@see: L{Breakpoint.__init__}
@type address: int
@param address: Memory address for breakpoint.
@type pages: int
@param address: Size of breakpoint in pages.
@type condition: function
@param condition: (Optional) Condition callback function.
@type action: function
@param action: (Optional) Action callback function.
"""
Breakpoint.__init__(self, address, pages * MemoryAddresses.pageSize,
condition, action)
## if (address & 0x00000FFF) != 0:
floordiv_align = long(address) // long(MemoryAddresses.pageSize)
truediv_align = float(address) / float(MemoryAddresses.pageSize)
if floordiv_align != truediv_align:
msg = "Address of page breakpoint " \
"must be aligned to a page size boundary " \
"(value %s received)" % HexDump.address(address)
raise ValueError(msg)
def get_size_in_pages(self):
"""
@rtype: int
@return: The size in pages of the breakpoint.
"""
# The size is always a multiple of the page size.
return self.get_size() // MemoryAddresses.pageSize
def __set_bp(self, aProcess):
"""
Sets the target pages as guard pages.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
lpAddress = self.get_address()
dwSize = self.get_size()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect | win32.PAGE_GUARD
aProcess.mprotect(lpAddress, dwSize, flNewProtect)
def __clear_bp(self, aProcess):
"""
Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
lpAddress = self.get_address()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect & (0xFFFFFFFF ^ win32.PAGE_GUARD) # DWORD
aProcess.mprotect(lpAddress, self.get_size(), flNewProtect)
def disable(self, aProcess, aThread):
if not self.is_disabled():
self.__clear_bp(aProcess)
super(PageBreakpoint, self).disable(aProcess, aThread)
def enable(self, aProcess, aThread):
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
msg = "Only one-shot page breakpoints are supported for %s"
raise NotImplementedError(msg % win32.arch)
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aProcess)
super(PageBreakpoint, self).enable(aProcess, aThread)
def one_shot(self, aProcess, aThread):
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aProcess)
super(PageBreakpoint, self).one_shot(aProcess, aThread)
def running(self, aProcess, aThread):
aThread.set_tf()
super(PageBreakpoint, self).running(aProcess, aThread)
#==============================================================================
class HardwareBreakpoint (Breakpoint):
"""
Hardware breakpoint (using debug registers).
@see: L{Debug.watch_variable}
@group Information:
get_slot, get_trigger, get_watch
@group Trigger flags:
BREAK_ON_EXECUTION, BREAK_ON_WRITE, BREAK_ON_ACCESS
@group Watch size flags:
WATCH_BYTE, WATCH_WORD, WATCH_DWORD, WATCH_QWORD
@type BREAK_ON_EXECUTION: int
@cvar BREAK_ON_EXECUTION: Break on execution.
@type BREAK_ON_WRITE: int
@cvar BREAK_ON_WRITE: Break on write.
@type BREAK_ON_ACCESS: int
@cvar BREAK_ON_ACCESS: Break on read or write.
@type WATCH_BYTE: int
@cvar WATCH_BYTE: Watch a byte.
@type WATCH_WORD: int
@cvar WATCH_WORD: Watch a word (2 bytes).
@type WATCH_DWORD: int
@cvar WATCH_DWORD: Watch a double word (4 bytes).
@type WATCH_QWORD: int
@cvar WATCH_QWORD: Watch one quad word (8 bytes).
@type validTriggers: tuple
@cvar validTriggers: Valid trigger flag values.
@type validWatchSizes: tuple
@cvar validWatchSizes: Valid watch flag values.
"""
typeName = 'hardware breakpoint'
BREAK_ON_EXECUTION = DebugRegister.BREAK_ON_EXECUTION
BREAK_ON_WRITE = DebugRegister.BREAK_ON_WRITE
BREAK_ON_ACCESS = DebugRegister.BREAK_ON_ACCESS
WATCH_BYTE = DebugRegister.WATCH_BYTE
WATCH_WORD = DebugRegister.WATCH_WORD
WATCH_DWORD = DebugRegister.WATCH_DWORD
WATCH_QWORD = DebugRegister.WATCH_QWORD
validTriggers = (
BREAK_ON_EXECUTION,
BREAK_ON_WRITE,
BREAK_ON_ACCESS,
)
validWatchSizes = (
WATCH_BYTE,
WATCH_WORD,
WATCH_DWORD,
WATCH_QWORD,
)
def __init__(self, address, triggerFlag = BREAK_ON_ACCESS,
sizeFlag = WATCH_DWORD,
condition = True,
action = None):
"""
Hardware breakpoint object.
@see: L{Breakpoint.__init__}
@type address: int
@param address: Memory address for breakpoint.
@type triggerFlag: int
@param triggerFlag: Trigger of breakpoint. Must be one of the following:
- L{BREAK_ON_EXECUTION}
Break on code execution.
- L{BREAK_ON_WRITE}
Break on memory read or write.
- L{BREAK_ON_ACCESS}
Break on memory write.
@type sizeFlag: int
@param sizeFlag: Size of breakpoint. Must be one of the following:
- L{WATCH_BYTE}
One (1) byte in size.
- L{WATCH_WORD}
Two (2) bytes in size.
- L{WATCH_DWORD}
Four (4) bytes in size.
- L{WATCH_QWORD}
Eight (8) bytes in size.
@type condition: function
@param condition: (Optional) Condition callback function.
@type action: function
@param action: (Optional) Action callback function.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
msg = "Hardware breakpoints not supported for %s" % win32.arch
raise NotImplementedError(msg)
if sizeFlag == self.WATCH_BYTE:
size = 1
elif sizeFlag == self.WATCH_WORD:
size = 2
elif sizeFlag == self.WATCH_DWORD:
size = 4
elif sizeFlag == self.WATCH_QWORD:
size = 8
else:
msg = "Invalid size flag for hardware breakpoint (%s)"
msg = msg % repr(sizeFlag)
raise ValueError(msg)
if triggerFlag not in self.validTriggers:
msg = "Invalid trigger flag for hardware breakpoint (%s)"
msg = msg % repr(triggerFlag)
raise ValueError(msg)
Breakpoint.__init__(self, address, size, condition, action)
self.__trigger = triggerFlag
self.__watch = sizeFlag
self.__slot = None
def __clear_bp(self, aThread):
"""
Clears this breakpoint from the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
if self.__slot is not None:
aThread.suspend()
try:
ctx = aThread.get_context(win32.CONTEXT_DEBUG_REGISTERS)
DebugRegister.clear_bp(ctx, self.__slot)
aThread.set_context(ctx)
self.__slot = None
finally:
aThread.resume()
def __set_bp(self, aThread):
"""
Sets this breakpoint in the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
if self.__slot is None:
aThread.suspend()
try:
ctx = aThread.get_context(win32.CONTEXT_DEBUG_REGISTERS)
self.__slot = DebugRegister.find_slot(ctx)
if self.__slot is None:
msg = "No available hardware breakpoint slots for thread ID %d"
msg = msg % aThread.get_tid()
raise RuntimeError(msg)
DebugRegister.set_bp(ctx, self.__slot, self.get_address(),
self.__trigger, self.__watch)
aThread.set_context(ctx)
finally:
aThread.resume()
def get_slot(self):
"""
@rtype: int
@return: The debug register number used by this breakpoint,
or C{None} if the breakpoint is not active.
"""
return self.__slot
def get_trigger(self):
"""
@see: L{validTriggers}
@rtype: int
@return: The breakpoint trigger flag.
"""
return self.__trigger
def get_watch(self):
"""
@see: L{validWatchSizes}
@rtype: int
@return: The breakpoint watch flag.
"""
return self.__watch
def disable(self, aProcess, aThread):
if not self.is_disabled():
self.__clear_bp(aThread)
super(HardwareBreakpoint, self).disable(aProcess, aThread)
def enable(self, aProcess, aThread):
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aThread)
super(HardwareBreakpoint, self).enable(aProcess, aThread)
def one_shot(self, aProcess, aThread):
if not self.is_enabled() and not self.is_one_shot():
self.__set_bp(aThread)
super(HardwareBreakpoint, self).one_shot(aProcess, aThread)
def running(self, aProcess, aThread):
self.__clear_bp(aThread)
super(HardwareBreakpoint, self).running(aProcess, aThread)
aThread.set_tf()
#==============================================================================
# XXX FIXME
#
# The implementation of function hooks is very simple. A breakpoint is set at
# the entry point. Each time it's hit the "pre" callback is executed. If a
# "post" callback was defined, a one-shot breakpoint is set at the return
# address - and when that breakpoint hits, the "post" callback is executed.
#
# Functions hooks, as they are implemented now, don't work correctly for
# recursive functions. The problem is we don't know when to remove the
# breakpoint at the return address. Also there could be more than one return
# address.
#
# One possible solution would involve a dictionary of lists, where the key
# would be the thread ID and the value a stack of return addresses. But we
# still don't know what to do if the "wrong" return address is hit for some
# reason (maybe check the stack pointer?). Or if both a code and a hardware
# breakpoint are hit simultaneously.
#
# For now, the workaround for the user is to set only the "pre" callback for
# functions that are known to be recursive.
#
# If an exception is thrown by a hooked function and caught by one of it's
# parent functions, the "post" callback won't be called and weird stuff may
# happen. A possible solution is to put a breakpoint in the system call that
# unwinds the stack, to detect this case and remove the "post" breakpoint.
#
# Hooks may also behave oddly if the return address is overwritten by a buffer
# overflow bug (this is similar to the exception problem). But it's probably a
# minor issue since when you're fuzzing a function for overflows you're usually
# not interested in the return value anyway.
# TODO: an API to modify the hooked function's arguments
class Hook (object):
"""
Factory class to produce hook objects. Used by L{Debug.hook_function} and
L{Debug.stalk_function}.
When you try to instance this class, one of the architecture specific
implementations is returned instead.
Instances act as an action callback for code breakpoints set at the
beginning of a function. It automatically retrieves the parameters from
the stack, sets a breakpoint at the return address and retrieves the
return value from the function call.
@see: L{_Hook_i386}, L{_Hook_amd64}
@type useHardwareBreakpoints: bool
@cvar useHardwareBreakpoints: C{True} to try to use hardware breakpoints,
C{False} otherwise.
"""
# This is a factory class that returns
# the architecture specific implementation.
def __new__(cls, *argv, **argd):
try:
arch = argd['arch']
del argd['arch']
except KeyError:
try:
arch = argv[4]
argv = argv[:4] + argv[5:]
except IndexError:
raise TypeError("Missing 'arch' argument!")
if arch is None:
arch = win32.arch
if arch == win32.ARCH_I386:
return _Hook_i386(*argv, **argd)
if arch == win32.ARCH_AMD64:
return _Hook_amd64(*argv, **argd)
return object.__new__(cls, *argv, **argd)
# XXX FIXME
#
# Hardware breakpoints don't work correctly (or al all) in old VirtualBox
# versions (3.0 and below).
#
# Maybe there should be a way to autodetect the buggy VirtualBox versions
# and tell Hook objects not to use hardware breakpoints?
#
# For now the workaround is to manually set this variable to True when
# WinAppDbg is installed on a physical machine.
#
useHardwareBreakpoints = False
def __init__(self, preCB = None, postCB = None,
paramCount = None, signature = None,
arch = None):
"""
@type preCB: function
@param preCB: (Optional) Callback triggered on function entry.
The signature for the callback should be something like this::
def pre_LoadLibraryEx(event, ra, lpFilename, hFile, dwFlags):
# return address
ra = params[0]
# function arguments start from here...
szFilename = event.get_process().peek_string(lpFilename)
# (...)
Note that all pointer types are treated like void pointers, so your
callback won't get the string or structure pointed to by it, but
the remote memory address instead. This is so to prevent the ctypes
library from being "too helpful" and trying to dereference the
pointer. To get the actual data being pointed to, use one of the
L{Process.read} methods.
@type postCB: function
@param postCB: (Optional) Callback triggered on function exit.
The signature for the callback should be something like this::
def post_LoadLibraryEx(event, return_value):
# (...)
@type paramCount: int
@param paramCount:
(Optional) Number of parameters for the C{preCB} callback,
not counting the return address. Parameters are read from
the stack and assumed to be DWORDs in 32 bits and QWORDs in 64.
This is a faster way to pull stack parameters in 32 bits, but in 64
bits (or with some odd APIs in 32 bits) it won't be useful, since
not all arguments to the hooked function will be of the same size.
For a more reliable and cross-platform way of hooking use the
C{signature} argument instead.
@type signature: tuple
@param signature:
(Optional) Tuple of C{ctypes} data types that constitute the
hooked function signature. When the function is called, this will
be used to parse the arguments from the stack. Overrides the
C{paramCount} argument.
@type arch: str
@param arch: (Optional) Target architecture. Defaults to the current
architecture. See: L{win32.arch}
"""
self.__preCB = preCB
self.__postCB = postCB
self.__paramStack = dict() # tid -> list of tuple( arg, arg, arg... )
self._paramCount = paramCount
if win32.arch != win32.ARCH_I386:
self.useHardwareBreakpoints = False
if win32.bits == 64 and paramCount and not signature:
signature = (win32.QWORD,) * paramCount
if signature:
self._signature = self._calc_signature(signature)
else:
self._signature = None
def _cast_signature_pointers_to_void(self, signature):
c_void_p = ctypes.c_void_p
c_char_p = ctypes.c_char_p
c_wchar_p = ctypes.c_wchar_p
_Pointer = ctypes._Pointer
cast = ctypes.cast
for i in compat.xrange(len(signature)):
t = signature[i]
if t is not c_void_p and (issubclass(t, _Pointer) \
or t in [c_char_p, c_wchar_p]):
signature[i] = cast(t, c_void_p)
def _calc_signature(self, signature):
raise NotImplementedError(
"Hook signatures are not supported for architecture: %s" \
% win32.arch)
def _get_return_address(self, aProcess, aThread):
return None
def _get_function_arguments(self, aProcess, aThread):
if self._signature or self._paramCount:
raise NotImplementedError(
"Hook signatures are not supported for architecture: %s" \
% win32.arch)
return ()
def _get_return_value(self, aThread):
return None
# By using break_at() to set a process-wide breakpoint on the function's
# return address, we might hit a race condition when more than one thread
# is being debugged.
#
# Hardware breakpoints should be used instead. But since a thread can run
# out of those, we need to fall back to this method when needed.
def __call__(self, event):
"""
Handles the breakpoint event on entry of the function.
@type event: L{ExceptionEvent}
@param event: Breakpoint hit event.
@raise WindowsError: An error occured.
"""
debug = event.debug
dwProcessId = event.get_pid()
dwThreadId = event.get_tid()
aProcess = event.get_process()
aThread = event.get_thread()
# Get the return address and function arguments.
ra = self._get_return_address(aProcess, aThread)
params = self._get_function_arguments(aProcess, aThread)
# Keep the function arguments for later use.
self.__push_params(dwThreadId, params)
# If we need to hook the return from the function...
bHookedReturn = False
if ra is not None and self.__postCB is not None:
# Try to set a one shot hardware breakpoint at the return address.
useHardwareBreakpoints = self.useHardwareBreakpoints
if useHardwareBreakpoints:
try:
debug.define_hardware_breakpoint(
dwThreadId,
ra,
event.debug.BP_BREAK_ON_EXECUTION,
event.debug.BP_WATCH_BYTE,
True,
self.__postCallAction_hwbp
)
debug.enable_one_shot_hardware_breakpoint(dwThreadId, ra)
bHookedReturn = True
except Exception:
e = sys.exc_info()[1]
useHardwareBreakpoints = False
msg = ("Failed to set hardware breakpoint"
" at address %s for thread ID %d")
msg = msg % (HexDump.address(ra), dwThreadId)
warnings.warn(msg, BreakpointWarning)
# If not possible, set a code breakpoint instead.
if not useHardwareBreakpoints:
try:
debug.break_at(dwProcessId, ra,
self.__postCallAction_codebp)
bHookedReturn = True
except Exception:
e = sys.exc_info()[1]
msg = ("Failed to set code breakpoint"
" at address %s for process ID %d")
msg = msg % (HexDump.address(ra), dwProcessId)
warnings.warn(msg, BreakpointWarning)
# Call the "pre" callback.
try:
self.__callHandler(self.__preCB, event, ra, *params)
# If no "post" callback is defined, forget the function arguments.
finally:
if not bHookedReturn:
self.__pop_params(dwThreadId)
def __postCallAction_hwbp(self, event):
"""
Handles hardware breakpoint events on return from the function.
@type event: L{ExceptionEvent}
@param event: Single step event.
"""
# Remove the one shot hardware breakpoint
# at the return address location in the stack.
tid = event.get_tid()
address = event.breakpoint.get_address()
event.debug.erase_hardware_breakpoint(tid, address)
# Call the "post" callback.
try:
self.__postCallAction(event)
# Forget the parameters.
finally:
self.__pop_params(tid)
def __postCallAction_codebp(self, event):
"""
Handles code breakpoint events on return from the function.
@type event: L{ExceptionEvent}
@param event: Breakpoint hit event.
"""
# If the breakpoint was accidentally hit by another thread,
# pass it to the debugger instead of calling the "post" callback.
#
# XXX FIXME:
# I suppose this check will fail under some weird conditions...
#
tid = event.get_tid()
if tid not in self.__paramStack:
return True
# Remove the code breakpoint at the return address.
pid = event.get_pid()
address = event.breakpoint.get_address()
event.debug.dont_break_at(pid, address)
# Call the "post" callback.
try:
self.__postCallAction(event)
# Forget the parameters.
finally:
self.__pop_params(tid)
def __postCallAction(self, event):
"""
Calls the "post" callback.
@type event: L{ExceptionEvent}
@param event: Breakpoint hit event.
"""
aThread = event.get_thread()
retval = self._get_return_value(aThread)
self.__callHandler(self.__postCB, event, retval)
def __callHandler(self, callback, event, *params):
"""
Calls a "pre" or "post" handler, if set.
@type callback: function
@param callback: Callback function to call.
@type event: L{ExceptionEvent}
@param event: Breakpoint hit event.
@type params: tuple
@param params: Parameters for the callback function.
"""
if callback is not None:
event.hook = self
callback(event, *params)
def __push_params(self, tid, params):
"""
Remembers the arguments tuple for the last call to the hooked function
from this thread.
@type tid: int
@param tid: Thread global ID.
@type params: tuple( arg, arg, arg... )
@param params: Tuple of arguments.
"""
stack = self.__paramStack.get( tid, [] )
stack.append(params)
self.__paramStack[tid] = stack
def __pop_params(self, tid):
"""
Forgets the arguments tuple for the last call to the hooked function
from this thread.
@type tid: int
@param tid: Thread global ID.
"""
stack = self.__paramStack[tid]
stack.pop()
if not stack:
del self.__paramStack[tid]
def get_params(self, tid):
"""
Returns the parameters found in the stack when the hooked function
was last called by this thread.
@type tid: int
@param tid: Thread global ID.
@rtype: tuple( arg, arg, arg... )
@return: Tuple of arguments.
"""
try:
params = self.get_params_stack(tid)[-1]
except IndexError:
msg = "Hooked function called from thread %d already returned"
raise IndexError(msg % tid)
return params
def get_params_stack(self, tid):
"""
Returns the parameters found in the stack each time the hooked function
was called by this thread and hasn't returned yet.
@type tid: int
@param tid: Thread global ID.
@rtype: list of tuple( arg, arg, arg... )
@return: List of argument tuples.
"""
try:
stack = self.__paramStack[tid]
except KeyError:
msg = "Hooked function was not called from thread %d"
raise KeyError(msg % tid)
return stack
def hook(self, debug, pid, address):
"""
Installs the function hook at a given process and address.
@see: L{unhook}
@warning: Do not call from an function hook callback.
@type debug: L{Debug}
@param debug: Debug object.
@type pid: int
@param pid: Process ID.
@type address: int
@param address: Function address.
"""
return debug.break_at(pid, address, self)
def unhook(self, debug, pid, address):
"""
Removes the function hook at a given process and address.
@see: L{hook}
@warning: Do not call from an function hook callback.
@type debug: L{Debug}
@param debug: Debug object.
@type pid: int
@param pid: Process ID.
@type address: int
@param address: Function address.
"""
return debug.dont_break_at(pid, address)
class _Hook_i386 (Hook):
"""
Implementation details for L{Hook} on the L{win32.ARCH_I386} architecture.
"""
# We don't want to inherit the parent class __new__ method.
__new__ = object.__new__
def _calc_signature(self, signature):
self._cast_signature_pointers_to_void(signature)
class Arguments (ctypes.Structure):
_fields_ = [ ("arg_%s" % i, signature[i]) \
for i in compat.xrange(len(signature) - 1, -1, -1) ]
return Arguments
def _get_return_address(self, aProcess, aThread):
return aProcess.read_pointer( aThread.get_sp() )
def _get_function_arguments(self, aProcess, aThread):
if self._signature:
params = aThread.read_stack_structure(self._signature,
offset = win32.sizeof(win32.LPVOID))
elif self._paramCount:
params = aThread.read_stack_dwords(self._paramCount,
offset = win32.sizeof(win32.LPVOID))
else:
params = ()
return params
def _get_return_value(self, aThread):
ctx = aThread.get_context(win32.CONTEXT_INTEGER)
return ctx['Eax']
class _Hook_amd64 (Hook):
"""
Implementation details for L{Hook} on the L{win32.ARCH_AMD64} architecture.
"""
# We don't want to inherit the parent class __new__ method.
__new__ = object.__new__
# Make a list of floating point types.
__float_types = (
ctypes.c_double,
ctypes.c_float,
)
# Long doubles are not supported in old versions of ctypes!
try:
__float_types += (ctypes.c_longdouble,)
except AttributeError:
pass
def _calc_signature(self, signature):
self._cast_signature_pointers_to_void(signature)
float_types = self.__float_types
c_sizeof = ctypes.sizeof
reg_size = c_sizeof(ctypes.c_size_t)
reg_int_sig = []
reg_float_sig = []
stack_sig = []
for i in compat.xrange(len(signature)):
arg = signature[i]
name = "arg_%d" % i
stack_sig.insert( 0, (name, arg) )
if i < 4:
if type(arg) in float_types:
reg_float_sig.append( (name, arg) )
elif c_sizeof(arg) <= reg_size:
reg_int_sig.append( (name, arg) )
else:
msg = ("Hook signatures don't support structures"
" within the first 4 arguments of a function"
" for the %s architecture") % win32.arch
raise NotImplementedError(msg)
if reg_int_sig:
class RegisterArguments (ctypes.Structure):
_fields_ = reg_int_sig
else:
RegisterArguments = None
if reg_float_sig:
class FloatArguments (ctypes.Structure):
_fields_ = reg_float_sig
else:
FloatArguments = None
if stack_sig:
class StackArguments (ctypes.Structure):
_fields_ = stack_sig
else:
StackArguments = None
return (len(signature),
RegisterArguments,
FloatArguments,
StackArguments)
def _get_return_address(self, aProcess, aThread):
return aProcess.read_pointer( aThread.get_sp() )
def _get_function_arguments(self, aProcess, aThread):
if self._signature:
(args_count,
RegisterArguments,
FloatArguments,
StackArguments) = self._signature
arguments = {}
if StackArguments:
address = aThread.get_sp() + win32.sizeof(win32.LPVOID)
stack_struct = aProcess.read_structure(address,
StackArguments)
stack_args = dict(
[ (name, stack_struct.__getattribute__(name))
for (name, type) in stack_struct._fields_ ]
)
arguments.update(stack_args)
flags = 0
if RegisterArguments:
flags = flags | win32.CONTEXT_INTEGER
if FloatArguments:
flags = flags | win32.CONTEXT_MMX_REGISTERS
if flags:
ctx = aThread.get_context(flags)
if RegisterArguments:
buffer = (win32.QWORD * 4)(ctx['Rcx'], ctx['Rdx'],
ctx['R8'], ctx['R9'])
reg_args = self._get_arguments_from_buffer(buffer,
RegisterArguments)
arguments.update(reg_args)
if FloatArguments:
buffer = (win32.M128A * 4)(ctx['XMM0'], ctx['XMM1'],
ctx['XMM2'], ctx['XMM3'])
float_args = self._get_arguments_from_buffer(buffer,
FloatArguments)
arguments.update(float_args)
params = tuple( [ arguments["arg_%d" % i]
for i in compat.xrange(args_count) ] )
else:
params = ()
return params
def _get_arguments_from_buffer(self, buffer, structure):
b_ptr = ctypes.pointer(buffer)
v_ptr = ctypes.cast(b_ptr, ctypes.c_void_p)
s_ptr = ctypes.cast(v_ptr, ctypes.POINTER(structure))
struct = s_ptr.contents
return dict(
[ (name, struct.__getattribute__(name))
for (name, type) in struct._fields_ ]
)
def _get_return_value(self, aThread):
ctx = aThread.get_context(win32.CONTEXT_INTEGER)
return ctx['Rax']
#------------------------------------------------------------------------------
# This class acts as a factory of Hook objects, one per target process.
# Said objects are deleted by the unhook() method.
class ApiHook (object):
"""
Used by L{EventHandler}.
This class acts as an action callback for code breakpoints set at the
beginning of a function. It automatically retrieves the parameters from
the stack, sets a breakpoint at the return address and retrieves the
return value from the function call.
@see: L{EventHandler.apiHooks}
@type modName: str
@ivar modName: Module name.
@type procName: str
@ivar procName: Procedure name.
"""
def __init__(self, eventHandler, modName, procName, paramCount = None,
signature = None):
"""
@type eventHandler: L{EventHandler}
@param eventHandler: Event handler instance. This is where the hook
callbacks are to be defined (see below).
@type modName: str
@param modName: Module name.
@type procName: str
@param procName: Procedure name.
The pre and post callbacks will be deduced from it.
For example, if the procedure is "LoadLibraryEx" the callback
routines will be "pre_LoadLibraryEx" and "post_LoadLibraryEx".
The signature for the callbacks should be something like this::
def pre_LoadLibraryEx(self, event, ra, lpFilename, hFile, dwFlags):
# return address
ra = params[0]
# function arguments start from here...
szFilename = event.get_process().peek_string(lpFilename)
# (...)
def post_LoadLibraryEx(self, event, return_value):
# (...)
Note that all pointer types are treated like void pointers, so your
callback won't get the string or structure pointed to by it, but
the remote memory address instead. This is so to prevent the ctypes
library from being "too helpful" and trying to dereference the
pointer. To get the actual data being pointed to, use one of the
L{Process.read} methods.
@type paramCount: int
@param paramCount:
(Optional) Number of parameters for the C{preCB} callback,
not counting the return address. Parameters are read from
the stack and assumed to be DWORDs in 32 bits and QWORDs in 64.
This is a faster way to pull stack parameters in 32 bits, but in 64
bits (or with some odd APIs in 32 bits) it won't be useful, since
not all arguments to the hooked function will be of the same size.
For a more reliable and cross-platform way of hooking use the
C{signature} argument instead.
@type signature: tuple
@param signature:
(Optional) Tuple of C{ctypes} data types that constitute the
hooked function signature. When the function is called, this will
be used to parse the arguments from the stack. Overrides the
C{paramCount} argument.
"""
self.__modName = modName
self.__procName = procName
self.__paramCount = paramCount
self.__signature = signature
self.__preCB = getattr(eventHandler, 'pre_%s' % procName, None)
self.__postCB = getattr(eventHandler, 'post_%s' % procName, None)
self.__hook = dict()
def __call__(self, event):
"""
Handles the breakpoint event on entry of the function.
@type event: L{ExceptionEvent}
@param event: Breakpoint hit event.
@raise WindowsError: An error occured.
"""
pid = event.get_pid()
try:
hook = self.__hook[pid]
except KeyError:
hook = Hook(self.__preCB, self.__postCB,
self.__paramCount, self.__signature,
event.get_process().get_arch() )
self.__hook[pid] = hook
return hook(event)
@property
def modName(self):
return self.__modName
@property
def procName(self):
return self.__procName
def hook(self, debug, pid):
"""
Installs the API hook on a given process and module.
@warning: Do not call from an API hook callback.
@type debug: L{Debug}
@param debug: Debug object.
@type pid: int
@param pid: Process ID.
"""
label = "%s!%s" % (self.__modName, self.__procName)
try:
hook = self.__hook[pid]
except KeyError:
try:
aProcess = debug.system.get_process(pid)
except KeyError:
aProcess = Process(pid)
hook = Hook(self.__preCB, self.__postCB,
self.__paramCount, self.__signature,
aProcess.get_arch() )
self.__hook[pid] = hook
hook.hook(debug, pid, label)
def unhook(self, debug, pid):
"""
Removes the API hook from the given process and module.
@warning: Do not call from an API hook callback.
@type debug: L{Debug}
@param debug: Debug object.
@type pid: int
@param pid: Process ID.
"""
try:
hook = self.__hook[pid]
except KeyError:
return
label = "%s!%s" % (self.__modName, self.__procName)
hook.unhook(debug, pid, label)
del self.__hook[pid]
#==============================================================================
class BufferWatch (object):
"""
Returned by L{Debug.watch_buffer}.
This object uniquely references a buffer being watched, even if there are
multiple watches set on the exact memory region.
@type pid: int
@ivar pid: Process ID.
@type start: int
@ivar start: Memory address of the start of the buffer.
@type end: int
@ivar end: Memory address of the end of the buffer.
@type action: callable
@ivar action: Action callback.
@type oneshot: bool
@ivar oneshot: C{True} for one shot breakpoints, C{False} otherwise.
"""
def __init__(self, pid, start, end, action = None, oneshot = False):
self.__pid = pid
self.__start = start
self.__end = end
self.__action = action
self.__oneshot = oneshot
@property
def pid(self):
return self.__pid
@property
def start(self):
return self.__start
@property
def end(self):
return self.__end
@property
def action(self):
return self.__action
@property
def oneshot(self):
return self.__oneshot
def match(self, address):
"""
Determine if the given memory address lies within the watched buffer.
@rtype: bool
@return: C{True} if the given memory address lies within the watched
buffer, C{False} otherwise.
"""
return self.__start <= address < self.__end
#==============================================================================
class _BufferWatchCondition (object):
"""
Used by L{Debug.watch_buffer}.
This class acts as a condition callback for page breakpoints.
It emulates page breakpoints that can overlap and/or take up less
than a page's size.
"""
def __init__(self):
self.__ranges = list() # list of BufferWatch in definition order
def add(self, bw):
"""
Adds a buffer watch identifier.
@type bw: L{BufferWatch}
@param bw:
Buffer watch identifier.
"""
self.__ranges.append(bw)
def remove(self, bw):
"""
Removes a buffer watch identifier.
@type bw: L{BufferWatch}
@param bw:
Buffer watch identifier.
@raise KeyError: The buffer watch identifier was already removed.
"""
try:
self.__ranges.remove(bw)
except KeyError:
if not bw.oneshot:
raise
def remove_last_match(self, address, size):
"""
Removes the last buffer from the watch object
to match the given address and size.
@type address: int
@param address: Memory address of buffer to stop watching.
@type size: int
@param size: Size in bytes of buffer to stop watching.
@rtype: int
@return: Number of matching elements found. Only the last one to be
added is actually deleted upon calling this method.
This counter allows you to know if there are more matching elements
and how many.
"""
count = 0
start = address
end = address + size - 1
matched = None
for item in self.__ranges:
if item.match(start) and item.match(end):
matched = item
count += 1
self.__ranges.remove(matched)
return count
def count(self):
"""
@rtype: int
@return: Number of buffers being watched.
"""
return len(self.__ranges)
def __call__(self, event):
"""
Breakpoint condition callback.
This method will also call the action callbacks for each
buffer being watched.
@type event: L{ExceptionEvent}
@param event: Guard page exception event.
@rtype: bool
@return: C{True} if the address being accessed belongs
to at least one of the buffers that was being watched
and had no action callback.
"""
address = event.get_exception_information(1)
bCondition = False
for bw in self.__ranges:
bMatched = bw.match(address)
try:
action = bw.action
if bMatched and action is not None:
try:
action(event)
except Exception:
e = sys.exc_info()[1]
msg = ("Breakpoint action callback %r"
" raised an exception: %s")
msg = msg % (action, traceback.format_exc(e))
warnings.warn(msg, BreakpointCallbackWarning)
else:
bCondition = bCondition or bMatched
finally:
if bMatched and bw.oneshot:
event.debug.dont_watch_buffer(bw)
return bCondition
#==============================================================================
class _BreakpointContainer (object):
"""
Encapsulates the capability to contain Breakpoint objects.
@group Breakpoints:
break_at, watch_variable, watch_buffer, hook_function,
dont_break_at, dont_watch_variable, dont_watch_buffer,
dont_hook_function, unhook_function,
break_on_error, dont_break_on_error
@group Stalking:
stalk_at, stalk_variable, stalk_buffer, stalk_function,
dont_stalk_at, dont_stalk_variable, dont_stalk_buffer,
dont_stalk_function
@group Tracing:
is_tracing, get_traced_tids,
start_tracing, stop_tracing,
start_tracing_process, stop_tracing_process,
start_tracing_all, stop_tracing_all
@group Symbols:
resolve_label, resolve_exported_function
@group Advanced breakpoint use:
define_code_breakpoint,
define_page_breakpoint,
define_hardware_breakpoint,
has_code_breakpoint,
has_page_breakpoint,
has_hardware_breakpoint,
get_code_breakpoint,
get_page_breakpoint,
get_hardware_breakpoint,
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint,
enable_code_breakpoint,
enable_page_breakpoint,
enable_hardware_breakpoint,
enable_one_shot_code_breakpoint,
enable_one_shot_page_breakpoint,
enable_one_shot_hardware_breakpoint,
disable_code_breakpoint,
disable_page_breakpoint,
disable_hardware_breakpoint
@group Listing breakpoints:
get_all_breakpoints,
get_all_code_breakpoints,
get_all_page_breakpoints,
get_all_hardware_breakpoints,
get_process_breakpoints,
get_process_code_breakpoints,
get_process_page_breakpoints,
get_process_hardware_breakpoints,
get_thread_hardware_breakpoints,
get_all_deferred_code_breakpoints,
get_process_deferred_code_breakpoints
@group Batch operations on breakpoints:
enable_all_breakpoints,
enable_one_shot_all_breakpoints,
disable_all_breakpoints,
erase_all_breakpoints,
enable_process_breakpoints,
enable_one_shot_process_breakpoints,
disable_process_breakpoints,
erase_process_breakpoints
@group Breakpoint types:
BP_TYPE_ANY, BP_TYPE_CODE, BP_TYPE_PAGE, BP_TYPE_HARDWARE
@group Breakpoint states:
BP_STATE_DISABLED, BP_STATE_ENABLED, BP_STATE_ONESHOT, BP_STATE_RUNNING
@group Memory breakpoint trigger flags:
BP_BREAK_ON_EXECUTION, BP_BREAK_ON_WRITE, BP_BREAK_ON_ACCESS
@group Memory breakpoint size flags:
BP_WATCH_BYTE, BP_WATCH_WORD, BP_WATCH_DWORD, BP_WATCH_QWORD
@type BP_TYPE_ANY: int
@cvar BP_TYPE_ANY: To get all breakpoints
@type BP_TYPE_CODE: int
@cvar BP_TYPE_CODE: To get code breakpoints only
@type BP_TYPE_PAGE: int
@cvar BP_TYPE_PAGE: To get page breakpoints only
@type BP_TYPE_HARDWARE: int
@cvar BP_TYPE_HARDWARE: To get hardware breakpoints only
@type BP_STATE_DISABLED: int
@cvar BP_STATE_DISABLED: Breakpoint is disabled.
@type BP_STATE_ENABLED: int
@cvar BP_STATE_ENABLED: Breakpoint is enabled.
@type BP_STATE_ONESHOT: int
@cvar BP_STATE_ONESHOT: Breakpoint is enabled for one shot.
@type BP_STATE_RUNNING: int
@cvar BP_STATE_RUNNING: Breakpoint is running (recently hit).
@type BP_BREAK_ON_EXECUTION: int
@cvar BP_BREAK_ON_EXECUTION: Break on code execution.
@type BP_BREAK_ON_WRITE: int
@cvar BP_BREAK_ON_WRITE: Break on memory write.
@type BP_BREAK_ON_ACCESS: int
@cvar BP_BREAK_ON_ACCESS: Break on memory read or write.
"""
# Breakpoint types
BP_TYPE_ANY = 0 # to get all breakpoints
BP_TYPE_CODE = 1
BP_TYPE_PAGE = 2
BP_TYPE_HARDWARE = 3
# Breakpoint states
BP_STATE_DISABLED = Breakpoint.DISABLED
BP_STATE_ENABLED = Breakpoint.ENABLED
BP_STATE_ONESHOT = Breakpoint.ONESHOT
BP_STATE_RUNNING = Breakpoint.RUNNING
# Memory breakpoint trigger flags
BP_BREAK_ON_EXECUTION = HardwareBreakpoint.BREAK_ON_EXECUTION
BP_BREAK_ON_WRITE = HardwareBreakpoint.BREAK_ON_WRITE
BP_BREAK_ON_ACCESS = HardwareBreakpoint.BREAK_ON_ACCESS
# Memory breakpoint size flags
BP_WATCH_BYTE = HardwareBreakpoint.WATCH_BYTE
BP_WATCH_WORD = HardwareBreakpoint.WATCH_WORD
BP_WATCH_QWORD = HardwareBreakpoint.WATCH_QWORD
BP_WATCH_DWORD = HardwareBreakpoint.WATCH_DWORD
def __init__(self):
self.__codeBP = dict() # (pid, address) -> CodeBreakpoint
self.__pageBP = dict() # (pid, address) -> PageBreakpoint
self.__hardwareBP = dict() # tid -> [ HardwareBreakpoint ]
self.__runningBP = dict() # tid -> set( Breakpoint )
self.__tracing = set() # set( tid )
self.__deferredBP = dict() # pid -> label -> (action, oneshot)
#------------------------------------------------------------------------------
# This operates on the dictionary of running breakpoints.
# Since the bps are meant to stay alive no cleanup is done here.
def __get_running_bp_set(self, tid):
"Auxiliary method."
return self.__runningBP.get(tid, ())
def __add_running_bp(self, tid, bp):
"Auxiliary method."
if tid not in self.__runningBP:
self.__runningBP[tid] = set()
self.__runningBP[tid].add(bp)
def __del_running_bp(self, tid, bp):
"Auxiliary method."
self.__runningBP[tid].remove(bp)
if not self.__runningBP[tid]:
del self.__runningBP[tid]
def __del_running_bp_from_all_threads(self, bp):
"Auxiliary method."
for (tid, bpset) in compat.iteritems(self.__runningBP):
if bp in bpset:
bpset.remove(bp)
self.system.get_thread(tid).clear_tf()
#------------------------------------------------------------------------------
# This is the cleanup code. Mostly called on response to exit/unload debug
# events. If possible it shouldn't raise exceptions on runtime errors.
# The main goal here is to avoid memory or handle leaks.
def __cleanup_breakpoint(self, event, bp):
"Auxiliary method."
try:
process = event.get_process()
thread = event.get_thread()
bp.disable(process, thread) # clear the debug regs / trap flag
except Exception:
pass
bp.set_condition(True) # break possible circular reference
bp.set_action(None) # break possible circular reference
def __cleanup_thread(self, event):
"""
Auxiliary method for L{_notify_exit_thread}
and L{_notify_exit_process}.
"""
tid = event.get_tid()
# Cleanup running breakpoints
try:
for bp in self.__runningBP[tid]:
self.__cleanup_breakpoint(event, bp)
del self.__runningBP[tid]
except KeyError:
pass
# Cleanup hardware breakpoints
try:
for bp in self.__hardwareBP[tid]:
self.__cleanup_breakpoint(event, bp)
del self.__hardwareBP[tid]
except KeyError:
pass
# Cleanup set of threads being traced
if tid in self.__tracing:
self.__tracing.remove(tid)
def __cleanup_process(self, event):
"""
Auxiliary method for L{_notify_exit_process}.
"""
pid = event.get_pid()
process = event.get_process()
# Cleanup code breakpoints
for (bp_pid, bp_address) in compat.keys(self.__codeBP):
if bp_pid == pid:
bp = self.__codeBP[ (bp_pid, bp_address) ]
self.__cleanup_breakpoint(event, bp)
del self.__codeBP[ (bp_pid, bp_address) ]
# Cleanup page breakpoints
for (bp_pid, bp_address) in compat.keys(self.__pageBP):
if bp_pid == pid:
bp = self.__pageBP[ (bp_pid, bp_address) ]
self.__cleanup_breakpoint(event, bp)
del self.__pageBP[ (bp_pid, bp_address) ]
# Cleanup deferred code breakpoints
try:
del self.__deferredBP[pid]
except KeyError:
pass
def __cleanup_module(self, event):
"""
Auxiliary method for L{_notify_unload_dll}.
"""
pid = event.get_pid()
process = event.get_process()
module = event.get_module()
# Cleanup thread breakpoints on this module
for tid in process.iter_thread_ids():
thread = process.get_thread(tid)
# Running breakpoints
if tid in self.__runningBP:
bplist = list(self.__runningBP[tid])
for bp in bplist:
bp_address = bp.get_address()
if process.get_module_at_address(bp_address) == module:
self.__cleanup_breakpoint(event, bp)
self.__runningBP[tid].remove(bp)
# Hardware breakpoints
if tid in self.__hardwareBP:
bplist = list(self.__hardwareBP[tid])
for bp in bplist:
bp_address = bp.get_address()
if process.get_module_at_address(bp_address) == module:
self.__cleanup_breakpoint(event, bp)
self.__hardwareBP[tid].remove(bp)
# Cleanup code breakpoints on this module
for (bp_pid, bp_address) in compat.keys(self.__codeBP):
if bp_pid == pid:
if process.get_module_at_address(bp_address) == module:
bp = self.__codeBP[ (bp_pid, bp_address) ]
self.__cleanup_breakpoint(event, bp)
del self.__codeBP[ (bp_pid, bp_address) ]
# Cleanup page breakpoints on this module
for (bp_pid, bp_address) in compat.keys(self.__pageBP):
if bp_pid == pid:
if process.get_module_at_address(bp_address) == module:
bp = self.__pageBP[ (bp_pid, bp_address) ]
self.__cleanup_breakpoint(event, bp)
del self.__pageBP[ (bp_pid, bp_address) ]
#------------------------------------------------------------------------------
# Defining breakpoints.
# Code breakpoints.
def define_code_breakpoint(self, dwProcessId, address, condition = True,
action = None):
"""
Creates a disabled code breakpoint at the given address.
@see:
L{has_code_breakpoint},
L{get_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of the code instruction to break at.
@type condition: function
@param condition: (Optional) Condition callback function.
The callback signature is::
def condition_callback(event):
return True # returns True or False
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@type action: function
@param action: (Optional) Action callback function.
If specified, the event is handled by this callback instead of
being dispatched normally.
The callback signature is::
def action_callback(event):
pass # no return value
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
"""
process = self.system.get_process(dwProcessId)
bp = CodeBreakpoint(address, condition, action)
key = (dwProcessId, bp.get_address())
if key in self.__codeBP:
msg = "Already exists (PID %d) : %r"
raise KeyError(msg % (dwProcessId, self.__codeBP[key]))
self.__codeBP[key] = bp
return bp
# Page breakpoints.
def define_page_breakpoint(self, dwProcessId, address, pages = 1,
condition = True,
action = None):
"""
Creates a disabled page breakpoint at the given address.
@see:
L{has_page_breakpoint},
L{get_page_breakpoint},
L{enable_page_breakpoint},
L{enable_one_shot_page_breakpoint},
L{disable_page_breakpoint},
L{erase_page_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of the first page to watch.
@type pages: int
@param pages: Number of pages to watch.
@type condition: function
@param condition: (Optional) Condition callback function.
The callback signature is::
def condition_callback(event):
return True # returns True or False
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@type action: function
@param action: (Optional) Action callback function.
If specified, the event is handled by this callback instead of
being dispatched normally.
The callback signature is::
def action_callback(event):
pass # no return value
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@rtype: L{PageBreakpoint}
@return: The page breakpoint object.
"""
process = self.system.get_process(dwProcessId)
bp = PageBreakpoint(address, pages, condition, action)
begin = bp.get_address()
end = begin + bp.get_size()
address = begin
pageSize = MemoryAddresses.pageSize
while address < end:
key = (dwProcessId, address)
if key in self.__pageBP:
msg = "Already exists (PID %d) : %r"
msg = msg % (dwProcessId, self.__pageBP[key])
raise KeyError(msg)
address = address + pageSize
address = begin
while address < end:
key = (dwProcessId, address)
self.__pageBP[key] = bp
address = address + pageSize
return bp
# Hardware breakpoints.
def define_hardware_breakpoint(self, dwThreadId, address,
triggerFlag = BP_BREAK_ON_ACCESS,
sizeFlag = BP_WATCH_DWORD,
condition = True,
action = None):
"""
Creates a disabled hardware breakpoint at the given address.
@see:
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint},
L{enable_one_shot_hardware_breakpoint},
L{disable_hardware_breakpoint},
L{erase_hardware_breakpoint}
@note:
Hardware breakpoints do not seem to work properly on VirtualBox.
See U{http://www.virtualbox.org/ticket/477}.
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address to watch.
@type triggerFlag: int
@param triggerFlag: Trigger of breakpoint. Must be one of the following:
- L{BP_BREAK_ON_EXECUTION}
Break on code execution.
- L{BP_BREAK_ON_WRITE}
Break on memory read or write.
- L{BP_BREAK_ON_ACCESS}
Break on memory write.
@type sizeFlag: int
@param sizeFlag: Size of breakpoint. Must be one of the following:
- L{BP_WATCH_BYTE}
One (1) byte in size.
- L{BP_WATCH_WORD}
Two (2) bytes in size.
- L{BP_WATCH_DWORD}
Four (4) bytes in size.
- L{BP_WATCH_QWORD}
Eight (8) bytes in size.
@type condition: function
@param condition: (Optional) Condition callback function.
The callback signature is::
def condition_callback(event):
return True # returns True or False
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@type action: function
@param action: (Optional) Action callback function.
If specified, the event is handled by this callback instead of
being dispatched normally.
The callback signature is::
def action_callback(event):
pass # no return value
Where B{event} is an L{Event} object,
and the return value is a boolean
(C{True} to dispatch the event, C{False} otherwise).
@rtype: L{HardwareBreakpoint}
@return: The hardware breakpoint object.
"""
thread = self.system.get_thread(dwThreadId)
bp = HardwareBreakpoint(address, triggerFlag, sizeFlag, condition,
action)
begin = bp.get_address()
end = begin + bp.get_size()
if dwThreadId in self.__hardwareBP:
bpSet = self.__hardwareBP[dwThreadId]
for oldbp in bpSet:
old_begin = oldbp.get_address()
old_end = old_begin + oldbp.get_size()
if MemoryAddresses.do_ranges_intersect(begin, end, old_begin,
old_end):
msg = "Already exists (TID %d) : %r" % (dwThreadId, oldbp)
raise KeyError(msg)
else:
bpSet = set()
self.__hardwareBP[dwThreadId] = bpSet
bpSet.add(bp)
return bp
#------------------------------------------------------------------------------
# Checking breakpoint definitions.
def has_code_breakpoint(self, dwProcessId, address):
"""
Checks if a code breakpoint is defined at the given address.
@see:
L{define_code_breakpoint},
L{get_code_breakpoint},
L{erase_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
@rtype: bool
@return: C{True} if the breakpoint is defined, C{False} otherwise.
"""
return (dwProcessId, address) in self.__codeBP
def has_page_breakpoint(self, dwProcessId, address):
"""
Checks if a page breakpoint is defined at the given address.
@see:
L{define_page_breakpoint},
L{get_page_breakpoint},
L{erase_page_breakpoint},
L{enable_page_breakpoint},
L{enable_one_shot_page_breakpoint},
L{disable_page_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
@rtype: bool
@return: C{True} if the breakpoint is defined, C{False} otherwise.
"""
return (dwProcessId, address) in self.__pageBP
def has_hardware_breakpoint(self, dwThreadId, address):
"""
Checks if a hardware breakpoint is defined at the given address.
@see:
L{define_hardware_breakpoint},
L{get_hardware_breakpoint},
L{erase_hardware_breakpoint},
L{enable_hardware_breakpoint},
L{enable_one_shot_hardware_breakpoint},
L{disable_hardware_breakpoint}
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
@rtype: bool
@return: C{True} if the breakpoint is defined, C{False} otherwise.
"""
if dwThreadId in self.__hardwareBP:
bpSet = self.__hardwareBP[dwThreadId]
for bp in bpSet:
if bp.get_address() == address:
return True
return False
#------------------------------------------------------------------------------
# Getting breakpoints.
def get_code_breakpoint(self, dwProcessId, address):
"""
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint},
L{erase_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{CodeBreakpoint}
@return: The code breakpoint object.
"""
key = (dwProcessId, address)
if key not in self.__codeBP:
msg = "No breakpoint at process %d, address %s"
address = HexDump.address(address)
raise KeyError(msg % (dwProcessId, address))
return self.__codeBP[key]
def get_page_breakpoint(self, dwProcessId, address):
"""
Returns the internally used breakpoint object,
for the page breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_page_breakpoint},
L{has_page_breakpoint},
L{enable_page_breakpoint},
L{enable_one_shot_page_breakpoint},
L{disable_page_breakpoint},
L{erase_page_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{PageBreakpoint}
@return: The page breakpoint object.
"""
key = (dwProcessId, address)
if key not in self.__pageBP:
msg = "No breakpoint at process %d, address %s"
address = HexDump.addresS(address)
raise KeyError(msg % (dwProcessId, address))
return self.__pageBP[key]
def get_hardware_breakpoint(self, dwThreadId, address):
"""
Returns the internally used breakpoint object,
for the code breakpoint defined at the given address.
@warning: It's usually best to call the L{Debug} methods
instead of accessing the breakpoint objects directly.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_code_breakpoint},
L{enable_hardware_breakpoint},
L{enable_one_shot_hardware_breakpoint},
L{disable_hardware_breakpoint},
L{erase_hardware_breakpoint}
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address where the breakpoint is defined.
@rtype: L{HardwareBreakpoint}
@return: The hardware breakpoint object.
"""
if dwThreadId not in self.__hardwareBP:
msg = "No hardware breakpoints set for thread %d"
raise KeyError(msg % dwThreadId)
for bp in self.__hardwareBP[dwThreadId]:
if bp.is_here(address):
return bp
msg = "No hardware breakpoint at thread %d, address %s"
raise KeyError(msg % (dwThreadId, HexDump.address(address)))
#------------------------------------------------------------------------------
# Enabling and disabling breakpoints.
def enable_code_breakpoint(self, dwProcessId, address):
"""
Enables the code breakpoint at the given address.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint}
L{erase_code_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_code_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.enable(p, None) # XXX HACK thread is not used
def enable_page_breakpoint(self, dwProcessId, address):
"""
Enables the page breakpoint at the given address.
@see:
L{define_page_breakpoint},
L{has_page_breakpoint},
L{get_page_breakpoint},
L{enable_one_shot_page_breakpoint},
L{disable_page_breakpoint}
L{erase_page_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_page_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.enable(p, None) # XXX HACK thread is not used
def enable_hardware_breakpoint(self, dwThreadId, address):
"""
Enables the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_one_shot_hardware_breakpoint},
L{disable_hardware_breakpoint}
L{erase_hardware_breakpoint},
@note: Do not set hardware breakpoints while processing the system
breakpoint event.
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.enable(None, t) # XXX HACK process is not used
def enable_one_shot_code_breakpoint(self, dwProcessId, address):
"""
Enables the code breakpoint at the given address for only one shot.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{get_code_breakpoint},
L{enable_code_breakpoint},
L{disable_code_breakpoint}
L{erase_code_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_code_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.one_shot(p, None) # XXX HACK thread is not used
def enable_one_shot_page_breakpoint(self, dwProcessId, address):
"""
Enables the page breakpoint at the given address for only one shot.
@see:
L{define_page_breakpoint},
L{has_page_breakpoint},
L{get_page_breakpoint},
L{enable_page_breakpoint},
L{disable_page_breakpoint}
L{erase_page_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_page_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.one_shot(p, None) # XXX HACK thread is not used
def enable_one_shot_hardware_breakpoint(self, dwThreadId, address):
"""
Enables the hardware breakpoint at the given address for only one shot.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint},
L{disable_hardware_breakpoint}
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.one_shot(None, t) # XXX HACK process is not used
def disable_code_breakpoint(self, dwProcessId, address):
"""
Disables the code breakpoint at the given address.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{get_code_breakpoint},
L{enable_code_breakpoint}
L{enable_one_shot_code_breakpoint},
L{erase_code_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_code_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.disable(p, None) # XXX HACK thread is not used
def disable_page_breakpoint(self, dwProcessId, address):
"""
Disables the page breakpoint at the given address.
@see:
L{define_page_breakpoint},
L{has_page_breakpoint},
L{get_page_breakpoint},
L{enable_page_breakpoint}
L{enable_one_shot_page_breakpoint},
L{erase_page_breakpoint},
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
p = self.system.get_process(dwProcessId)
bp = self.get_page_breakpoint(dwProcessId, address)
if bp.is_running():
self.__del_running_bp_from_all_threads(bp)
bp.disable(p, None) # XXX HACK thread is not used
def disable_hardware_breakpoint(self, dwThreadId, address):
"""
Disables the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint}
L{enable_one_shot_hardware_breakpoint},
L{erase_hardware_breakpoint},
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
t = self.system.get_thread(dwThreadId)
p = t.get_process()
bp = self.get_hardware_breakpoint(dwThreadId, address)
if bp.is_running():
self.__del_running_bp(dwThreadId, bp)
bp.disable(p, t)
#------------------------------------------------------------------------------
# Undefining (erasing) breakpoints.
def erase_code_breakpoint(self, dwProcessId, address):
"""
Erases the code breakpoint at the given address.
@see:
L{define_code_breakpoint},
L{has_code_breakpoint},
L{get_code_breakpoint},
L{enable_code_breakpoint},
L{enable_one_shot_code_breakpoint},
L{disable_code_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
bp = self.get_code_breakpoint(dwProcessId, address)
if not bp.is_disabled():
self.disable_code_breakpoint(dwProcessId, address)
del self.__codeBP[ (dwProcessId, address) ]
def erase_page_breakpoint(self, dwProcessId, address):
"""
Erases the page breakpoint at the given address.
@see:
L{define_page_breakpoint},
L{has_page_breakpoint},
L{get_page_breakpoint},
L{enable_page_breakpoint},
L{enable_one_shot_page_breakpoint},
L{disable_page_breakpoint}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
bp = self.get_page_breakpoint(dwProcessId, address)
begin = bp.get_address()
end = begin + bp.get_size()
if not bp.is_disabled():
self.disable_page_breakpoint(dwProcessId, address)
address = begin
pageSize = MemoryAddresses.pageSize
while address < end:
del self.__pageBP[ (dwProcessId, address) ]
address = address + pageSize
def erase_hardware_breakpoint(self, dwThreadId, address):
"""
Erases the hardware breakpoint at the given address.
@see:
L{define_hardware_breakpoint},
L{has_hardware_breakpoint},
L{get_hardware_breakpoint},
L{enable_hardware_breakpoint},
L{enable_one_shot_hardware_breakpoint},
L{disable_hardware_breakpoint}
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@type address: int
@param address: Memory address of breakpoint.
"""
bp = self.get_hardware_breakpoint(dwThreadId, address)
if not bp.is_disabled():
self.disable_hardware_breakpoint(dwThreadId, address)
bpSet = self.__hardwareBP[dwThreadId]
bpSet.remove(bp)
if not bpSet:
del self.__hardwareBP[dwThreadId]
#------------------------------------------------------------------------------
# Listing breakpoints.
def get_all_breakpoints(self):
"""
Returns all breakpoint objects as a list of tuples.
Each tuple contains:
- Process global ID to which the breakpoint applies.
- Thread global ID to which the breakpoint applies, or C{None}.
- The L{Breakpoint} object itself.
@note: If you're only interested in a specific breakpoint type, or in
breakpoints for a specific process or thread, it's probably faster
to call one of the following methods:
- L{get_all_code_breakpoints}
- L{get_all_page_breakpoints}
- L{get_all_hardware_breakpoints}
- L{get_process_code_breakpoints}
- L{get_process_page_breakpoints}
- L{get_process_hardware_breakpoints}
- L{get_thread_hardware_breakpoints}
@rtype: list of tuple( pid, tid, bp )
@return: List of all breakpoints.
"""
bplist = list()
# Get the code breakpoints.
for (pid, bp) in self.get_all_code_breakpoints():
bplist.append( (pid, None, bp) )
# Get the page breakpoints.
for (pid, bp) in self.get_all_page_breakpoints():
bplist.append( (pid, None, bp) )
# Get the hardware breakpoints.
for (tid, bp) in self.get_all_hardware_breakpoints():
pid = self.system.get_thread(tid).get_pid()
bplist.append( (pid, tid, bp) )
# Return the list of breakpoints.
return bplist
def get_all_code_breakpoints(self):
"""
@rtype: list of tuple( int, L{CodeBreakpoint} )
@return: All code breakpoints as a list of tuples (pid, bp).
"""
return [ (pid, bp) for ((pid, address), bp) in compat.iteritems(self.__codeBP) ]
def get_all_page_breakpoints(self):
"""
@rtype: list of tuple( int, L{PageBreakpoint} )
@return: All page breakpoints as a list of tuples (pid, bp).
"""
## return list( set( [ (pid, bp) for ((pid, address), bp) in compat.iteritems(self.__pageBP) ] ) )
result = set()
for ((pid, address), bp) in compat.iteritems(self.__pageBP):
result.add( (pid, bp) )
return list(result)
def get_all_hardware_breakpoints(self):
"""
@rtype: list of tuple( int, L{HardwareBreakpoint} )
@return: All hardware breakpoints as a list of tuples (tid, bp).
"""
result = list()
for (tid, bplist) in compat.iteritems(self.__hardwareBP):
for bp in bplist:
result.append( (tid, bp) )
return result
def get_process_breakpoints(self, dwProcessId):
"""
Returns all breakpoint objects for the given process as a list of tuples.
Each tuple contains:
- Process global ID to which the breakpoint applies.
- Thread global ID to which the breakpoint applies, or C{None}.
- The L{Breakpoint} object itself.
@note: If you're only interested in a specific breakpoint type, or in
breakpoints for a specific process or thread, it's probably faster
to call one of the following methods:
- L{get_all_code_breakpoints}
- L{get_all_page_breakpoints}
- L{get_all_hardware_breakpoints}
- L{get_process_code_breakpoints}
- L{get_process_page_breakpoints}
- L{get_process_hardware_breakpoints}
- L{get_thread_hardware_breakpoints}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of tuple( pid, tid, bp )
@return: List of all breakpoints for the given process.
"""
bplist = list()
# Get the code breakpoints.
for bp in self.get_process_code_breakpoints(dwProcessId):
bplist.append( (dwProcessId, None, bp) )
# Get the page breakpoints.
for bp in self.get_process_page_breakpoints(dwProcessId):
bplist.append( (dwProcessId, None, bp) )
# Get the hardware breakpoints.
for (tid, bp) in self.get_process_hardware_breakpoints(dwProcessId):
pid = self.system.get_thread(tid).get_pid()
bplist.append( (dwProcessId, tid, bp) )
# Return the list of breakpoints.
return bplist
def get_process_code_breakpoints(self, dwProcessId):
"""
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of L{CodeBreakpoint}
@return: All code breakpoints for the given process.
"""
return [ bp for ((pid, address), bp) in compat.iteritems(self.__codeBP) \
if pid == dwProcessId ]
def get_process_page_breakpoints(self, dwProcessId):
"""
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of L{PageBreakpoint}
@return: All page breakpoints for the given process.
"""
return [ bp for ((pid, address), bp) in compat.iteritems(self.__pageBP) \
if pid == dwProcessId ]
def get_thread_hardware_breakpoints(self, dwThreadId):
"""
@see: L{get_process_hardware_breakpoints}
@type dwThreadId: int
@param dwThreadId: Thread global ID.
@rtype: list of L{HardwareBreakpoint}
@return: All hardware breakpoints for the given thread.
"""
result = list()
for (tid, bplist) in compat.iteritems(self.__hardwareBP):
if tid == dwThreadId:
for bp in bplist:
result.append(bp)
return result
def get_process_hardware_breakpoints(self, dwProcessId):
"""
@see: L{get_thread_hardware_breakpoints}
@type dwProcessId: int
@param dwProcessId: Process global ID.
@rtype: list of tuple( int, L{HardwareBreakpoint} )
@return: All hardware breakpoints for each thread in the given process
as a list of tuples (tid, bp).
"""
result = list()
aProcess = self.system.get_process(dwProcessId)
for dwThreadId in aProcess.iter_thread_ids():
if dwThreadId in self.__hardwareBP:
bplist = self.__hardwareBP[dwThreadId]
for bp in bplist:
result.append( (dwThreadId, bp) )
return result
## def get_all_hooks(self):
## """
## @see: L{get_process_hooks}
##
## @rtype: list of tuple( int, int, L{Hook} )
## @return: All defined hooks as a list of tuples (pid, address, hook).
## """
## return [ (pid, address, hook) \
## for ((pid, address), hook) in self.__hook_objects ]
##
## def get_process_hooks(self, dwProcessId):
## """
## @see: L{get_all_hooks}
##
## @type dwProcessId: int
## @param dwProcessId: Process global ID.
##
## @rtype: list of tuple( int, int, L{Hook} )
## @return: All hooks for the given process as a list of tuples
## (pid, address, hook).
## """
## return [ (pid, address, hook) \
## for ((pid, address), hook) in self.__hook_objects \
## if pid == dwProcessId ]
#------------------------------------------------------------------------------
# Batch operations on all breakpoints.
def enable_all_breakpoints(self):
"""
Enables all disabled breakpoints in all processes.
@see:
enable_code_breakpoint,
enable_page_breakpoint,
enable_hardware_breakpoint
"""
# disable code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
if bp.is_disabled():
self.enable_code_breakpoint(pid, bp.get_address())
# disable page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
if bp.is_disabled():
self.enable_page_breakpoint(pid, bp.get_address())
# disable hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
if bp.is_disabled():
self.enable_hardware_breakpoint(tid, bp.get_address())
def enable_one_shot_all_breakpoints(self):
"""
Enables for one shot all disabled breakpoints in all processes.
@see:
enable_one_shot_code_breakpoint,
enable_one_shot_page_breakpoint,
enable_one_shot_hardware_breakpoint
"""
# disable code breakpoints for one shot
for (pid, bp) in self.get_all_code_breakpoints():
if bp.is_disabled():
self.enable_one_shot_code_breakpoint(pid, bp.get_address())
# disable page breakpoints for one shot
for (pid, bp) in self.get_all_page_breakpoints():
if bp.is_disabled():
self.enable_one_shot_page_breakpoint(pid, bp.get_address())
# disable hardware breakpoints for one shot
for (tid, bp) in self.get_all_hardware_breakpoints():
if bp.is_disabled():
self.enable_one_shot_hardware_breakpoint(tid, bp.get_address())
def disable_all_breakpoints(self):
"""
Disables all breakpoints in all processes.
@see:
disable_code_breakpoint,
disable_page_breakpoint,
disable_hardware_breakpoint
"""
# disable code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
self.disable_code_breakpoint(pid, bp.get_address())
# disable page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
self.disable_page_breakpoint(pid, bp.get_address())
# disable hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
self.disable_hardware_breakpoint(tid, bp.get_address())
def erase_all_breakpoints(self):
"""
Erases all breakpoints in all processes.
@see:
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint
"""
# This should be faster but let's not trust the GC so much :P
# self.disable_all_breakpoints()
# self.__codeBP = dict()
# self.__pageBP = dict()
# self.__hardwareBP = dict()
# self.__runningBP = dict()
# self.__hook_objects = dict()
## # erase hooks
## for (pid, address, hook) in self.get_all_hooks():
## self.dont_hook_function(pid, address)
# erase code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
self.erase_code_breakpoint(pid, bp.get_address())
# erase page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
self.erase_page_breakpoint(pid, bp.get_address())
# erase hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
self.erase_hardware_breakpoint(tid, bp.get_address())
#------------------------------------------------------------------------------
# Batch operations on breakpoints per process.
def enable_process_breakpoints(self, dwProcessId):
"""
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_hardware_breakpoint(dwThreadId, bp.get_address())
def enable_one_shot_process_breakpoints(self, dwProcessId):
"""
Enables for one shot all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints for one shot
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_one_shot_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints for one shot
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_one_shot_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints for one shot
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_one_shot_hardware_breakpoint(dwThreadId, bp.get_address())
def disable_process_breakpoints(self, dwProcessId):
"""
Disables all breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# disable code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
self.disable_code_breakpoint(dwProcessId, bp.get_address())
# disable page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
self.disable_page_breakpoint(dwProcessId, bp.get_address())
# disable hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
self.disable_hardware_breakpoint(dwThreadId, bp.get_address())
def erase_process_breakpoints(self, dwProcessId):
"""
Erases all breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# disable breakpoints first
# if an error occurs, no breakpoint is erased
self.disable_process_breakpoints(dwProcessId)
## # erase hooks
## for address, hook in self.get_process_hooks(dwProcessId):
## self.dont_hook_function(dwProcessId, address)
# erase code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
self.erase_code_breakpoint(dwProcessId, bp.get_address())
# erase page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
self.erase_page_breakpoint(dwProcessId, bp.get_address())
# erase hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
self.erase_hardware_breakpoint(dwThreadId, bp.get_address())
#------------------------------------------------------------------------------
# Internal handlers of debug events.
def _notify_guard_page(self, event):
"""
Notify breakpoints of a guard page exception event.
@type event: L{ExceptionEvent}
@param event: Guard page exception event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
address = event.get_fault_address()
pid = event.get_pid()
bCallHandler = True
# Align address to page boundary.
mask = ~(MemoryAddresses.pageSize - 1)
address = address & mask
# Do we have an active page breakpoint there?
key = (pid, address)
if key in self.__pageBP:
bp = self.__pageBP[key]
if bp.is_enabled() or bp.is_one_shot():
# Breakpoint is ours.
event.continueStatus = win32.DBG_CONTINUE
## event.continueStatus = win32.DBG_EXCEPTION_HANDLED
# Hit the breakpoint.
bp.hit(event)
# Remember breakpoints in RUNNING state.
if bp.is_running():
tid = event.get_tid()
self.__add_running_bp(tid, bp)
# Evaluate the breakpoint condition.
bCondition = bp.eval_condition(event)
# If the breakpoint is automatic, run the action.
# If not, notify the user.
if bCondition and bp.is_automatic():
bp.run_action(event)
bCallHandler = False
else:
bCallHandler = bCondition
# If we don't have a breakpoint here pass the exception to the debugee.
# This is a normally occurring exception so we shouldn't swallow it.
else:
event.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
return bCallHandler
def _notify_breakpoint(self, event):
"""
Notify breakpoints of a breakpoint exception event.
@type event: L{ExceptionEvent}
@param event: Breakpoint exception event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
address = event.get_exception_address()
pid = event.get_pid()
bCallHandler = True
# Do we have an active code breakpoint there?
key = (pid, address)
if key in self.__codeBP:
bp = self.__codeBP[key]
if not bp.is_disabled():
# Change the program counter (PC) to the exception address.
# This accounts for the change in PC caused by
# executing the breakpoint instruction, no matter
# the size of it.
aThread = event.get_thread()
aThread.set_pc(address)
# Swallow the exception.
event.continueStatus = win32.DBG_CONTINUE
# Hit the breakpoint.
bp.hit(event)
# Remember breakpoints in RUNNING state.
if bp.is_running():
tid = event.get_tid()
self.__add_running_bp(tid, bp)
# Evaluate the breakpoint condition.
bCondition = bp.eval_condition(event)
# If the breakpoint is automatic, run the action.
# If not, notify the user.
if bCondition and bp.is_automatic():
bCallHandler = bp.run_action(event)
else:
bCallHandler = bCondition
# Handle the system breakpoint.
# TODO: examine the stack trace to figure out if it's really a
# system breakpoint or an antidebug trick. The caller should be
# inside ntdll if it's legit.
elif event.get_process().is_system_defined_breakpoint(address):
event.continueStatus = win32.DBG_CONTINUE
# In hostile mode, if we don't have a breakpoint here pass the
# exception to the debugee. In normal mode assume all breakpoint
# exceptions are to be handled by the debugger.
else:
if self.in_hostile_mode():
event.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
else:
event.continueStatus = win32.DBG_CONTINUE
return bCallHandler
def _notify_single_step(self, event):
"""
Notify breakpoints of a single step exception event.
@type event: L{ExceptionEvent}
@param event: Single step exception event.
@rtype: bool
@return: C{True} to call the user-defined handle, C{False} otherwise.
"""
pid = event.get_pid()
tid = event.get_tid()
aThread = event.get_thread()
aProcess = event.get_process()
bCallHandler = True
bIsOurs = False
# In hostile mode set the default to pass the exception to the debugee.
# If we later determine the exception is ours, hide it instead.
old_continueStatus = event.continueStatus
try:
if self.in_hostile_mode():
event.continueStatus = win32.DBG_EXCEPTION_NOT_HANDLED
# Single step support is implemented on x86/x64 architectures only.
if self.system.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
return bCallHandler
# In hostile mode, read the last executed bytes to try to detect
# some antidebug tricks. Skip this check in normal mode because
# it'd slow things down.
#
# FIXME: weird opcode encodings may bypass this check!
#
# bFakeSingleStep: Ice Breakpoint undocumented instruction.
# bHideTrapFlag: Don't let pushf instructions get the real value of
# the trap flag.
# bNextIsPopFlags: Don't let popf instructions clear the trap flag.
#
bFakeSingleStep = False
bLastIsPushFlags = False
bNextIsPopFlags = False
if self.in_hostile_mode():
pc = aThread.get_pc()
c = aProcess.read_char(pc - 1)
if c == 0xF1: # int1
bFakeSingleStep = True
elif c == 0x9C: # pushf
bLastIsPushFlags = True
c = aProcess.peek_char(pc)
if c == 0x66: # the only valid prefix for popf
c = aProcess.peek_char(pc + 1)
if c == 0x9D: # popf
if bLastIsPushFlags:
bLastIsPushFlags = False # they cancel each other out
else:
bNextIsPopFlags = True
# When the thread is in tracing mode,
# don't pass the exception to the debugee
# and set the trap flag again.
if self.is_tracing(tid):
bIsOurs = True
if not bFakeSingleStep:
event.continueStatus = win32.DBG_CONTINUE
aThread.set_tf()
# Don't let the debugee read or write the trap flag.
# This code works in 32 and 64 bits thanks to the endianness.
if bLastIsPushFlags or bNextIsPopFlags:
sp = aThread.get_sp()
flags = aProcess.read_dword(sp)
if bLastIsPushFlags:
flags &= ~Thread.Flags.Trap
else: # if bNextIsPopFlags:
flags |= Thread.Flags.Trap
aProcess.write_dword(sp, flags)
# Handle breakpoints in RUNNING state.
running = self.__get_running_bp_set(tid)
if running:
bIsOurs = True
if not bFakeSingleStep:
event.continueStatus = win32.DBG_CONTINUE
bCallHandler = False
while running:
try:
running.pop().hit(event)
except Exception:
e = sys.exc_info()[1]
warnings.warn(str(e), BreakpointWarning)
# Handle hardware breakpoints.
if tid in self.__hardwareBP:
ctx = aThread.get_context(win32.CONTEXT_DEBUG_REGISTERS)
Dr6 = ctx['Dr6']
ctx['Dr6'] = Dr6 & DebugRegister.clearHitMask
aThread.set_context(ctx)
bFoundBreakpoint = False
bCondition = False
hwbpList = [ bp for bp in self.__hardwareBP[tid] ]
for bp in hwbpList:
if not bp in self.__hardwareBP[tid]:
continue # it was removed by a user-defined callback
slot = bp.get_slot()
if (slot is not None) and \
(Dr6 & DebugRegister.hitMask[slot]):
if not bFoundBreakpoint: #set before actions are called
if not bFakeSingleStep:
event.continueStatus = win32.DBG_CONTINUE
bFoundBreakpoint = True
bIsOurs = True
bp.hit(event)
if bp.is_running():
self.__add_running_bp(tid, bp)
bThisCondition = bp.eval_condition(event)
if bThisCondition and bp.is_automatic():
bp.run_action(event)
bThisCondition = False
bCondition = bCondition or bThisCondition
if bFoundBreakpoint:
bCallHandler = bCondition
# Always call the user-defined handler
# when the thread is in tracing mode.
if self.is_tracing(tid):
bCallHandler = True
# If we're not in hostile mode, by default we assume all single
# step exceptions are caused by the debugger.
if not bIsOurs and not self.in_hostile_mode():
aThread.clear_tf()
# If the user hit Control-C while we were inside the try block,
# set the default continueStatus back.
except:
event.continueStatus = old_continueStatus
raise
return bCallHandler
def _notify_load_dll(self, event):
"""
Notify the loading of a DLL.
@type event: L{LoadDLLEvent}
@param event: Load DLL event.
@rtype: bool
@return: C{True} to call the user-defined handler, C{False} otherwise.
"""
self.__set_deferred_breakpoints(event)
return True
def _notify_unload_dll(self, event):
"""
Notify the unloading of a DLL.
@type event: L{UnloadDLLEvent}
@param event: Unload DLL event.
@rtype: bool
@return: C{True} to call the user-defined handler, C{False} otherwise.
"""
self.__cleanup_module(event)
return True
def _notify_exit_thread(self, event):
"""
Notify the termination of a thread.
@type event: L{ExitThreadEvent}
@param event: Exit thread event.
@rtype: bool
@return: C{True} to call the user-defined handler, C{False} otherwise.
"""
self.__cleanup_thread(event)
return True
def _notify_exit_process(self, event):
"""
Notify the termination of a process.
@type event: L{ExitProcessEvent}
@param event: Exit process event.
@rtype: bool
@return: C{True} to call the user-defined handler, C{False} otherwise.
"""
self.__cleanup_process(event)
self.__cleanup_thread(event)
return True
#------------------------------------------------------------------------------
# This is the high level breakpoint interface. Here we don't have to care
# about defining or enabling breakpoints, and many errors are ignored
# (like for example setting the same breakpoint twice, here the second
# breakpoint replaces the first, much like in WinDBG). It should be easier
# and more intuitive, if less detailed. It also allows the use of deferred
# breakpoints.
#------------------------------------------------------------------------------
# Code breakpoints
def __set_break(self, pid, address, action, oneshot):
"""
Used by L{break_at} and L{stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
@type action: function
@param action: (Optional) Action callback function.
See L{define_code_breakpoint} for more details.
@type oneshot: bool
@param oneshot: C{True} for one-shot breakpoints, C{False} otherwise.
@rtype: L{Breakpoint}
@return: Returns the new L{Breakpoint} object, or C{None} if the label
couldn't be resolved and the breakpoint was deferred. Deferred
breakpoints are set when the DLL they point to is loaded.
"""
if type(address) not in (int, long):
label = address
try:
address = self.system.get_process(pid).resolve_label(address)
if not address:
raise Exception()
except Exception:
try:
deferred = self.__deferredBP[pid]
except KeyError:
deferred = dict()
self.__deferredBP[pid] = deferred
if label in deferred:
msg = "Redefined deferred code breakpoint at %s in process ID %d"
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
deferred[label] = (action, oneshot)
return None
if self.has_code_breakpoint(pid, address):
bp = self.get_code_breakpoint(pid, address)
if bp.get_action() != action: # can't use "is not", fails for bound methods
bp.set_action(action)
msg = "Redefined code breakpoint at %s in process ID %d"
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
else:
self.define_code_breakpoint(pid, address, True, action)
bp = self.get_code_breakpoint(pid, address)
if oneshot:
if not bp.is_one_shot():
self.enable_one_shot_code_breakpoint(pid, address)
else:
if not bp.is_enabled():
self.enable_code_breakpoint(pid, address)
return bp
def __clear_break(self, pid, address):
"""
Used by L{dont_break_at} and L{dont_stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
if type(address) not in (int, long):
unknown = True
label = address
try:
deferred = self.__deferredBP[pid]
del deferred[label]
unknown = False
except KeyError:
## traceback.print_last() # XXX DEBUG
pass
aProcess = self.system.get_process(pid)
try:
address = aProcess.resolve_label(label)
if not address:
raise Exception()
except Exception:
## traceback.print_last() # XXX DEBUG
if unknown:
msg = ("Can't clear unknown code breakpoint"
" at %s in process ID %d")
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
return
if self.has_code_breakpoint(pid, address):
self.erase_code_breakpoint(pid, address)
def __set_deferred_breakpoints(self, event):
"""
Used internally. Sets all deferred breakpoints for a DLL when it's
loaded.
@type event: L{LoadDLLEvent}
@param event: Load DLL event.
"""
pid = event.get_pid()
try:
deferred = self.__deferredBP[pid]
except KeyError:
return
aProcess = event.get_process()
for (label, (action, oneshot)) in deferred.items():
try:
address = aProcess.resolve_label(label)
except Exception:
continue
del deferred[label]
try:
self.__set_break(pid, address, action, oneshot)
except Exception:
msg = "Can't set deferred breakpoint %s at process ID %d"
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
def get_all_deferred_code_breakpoints(self):
"""
Returns a list of deferred code breakpoints.
@rtype: tuple of (int, str, callable, bool)
@return: Tuple containing the following elements:
- Process ID where to set the breakpoint.
- Label pointing to the address where to set the breakpoint.
- Action callback for the breakpoint.
- C{True} of the breakpoint is one-shot, C{False} otherwise.
"""
result = []
for pid, deferred in compat.iteritems(self.__deferredBP):
for (label, (action, oneshot)) in compat.iteritems(deferred):
result.add( (pid, label, action, oneshot) )
return result
def get_process_deferred_code_breakpoints(self, dwProcessId):
"""
Returns a list of deferred code breakpoints.
@type dwProcessId: int
@param dwProcessId: Process ID.
@rtype: tuple of (int, str, callable, bool)
@return: Tuple containing the following elements:
- Label pointing to the address where to set the breakpoint.
- Action callback for the breakpoint.
- C{True} of the breakpoint is one-shot, C{False} otherwise.
"""
return [ (label, action, oneshot)
for (label, (action, oneshot))
in compat.iteritems(self.__deferredBP.get(dwProcessId, {})) ]
def stalk_at(self, pid, address, action = None):
"""
Sets a one shot code breakpoint at the given process and address.
If instead of an address you pass a label, the breakpoint may be
deferred until the DLL it points to is loaded.
@see: L{break_at}, L{dont_stalk_at}
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
@type action: function
@param action: (Optional) Action callback function.
See L{define_code_breakpoint} for more details.
@rtype: bool
@return: C{True} if the breakpoint was set immediately, or C{False} if
it was deferred.
"""
bp = self.__set_break(pid, address, action, oneshot = True)
return bp is not None
def break_at(self, pid, address, action = None):
"""
Sets a code breakpoint at the given process and address.
If instead of an address you pass a label, the breakpoint may be
deferred until the DLL it points to is loaded.
@see: L{stalk_at}, L{dont_break_at}
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
@type action: function
@param action: (Optional) Action callback function.
See L{define_code_breakpoint} for more details.
@rtype: bool
@return: C{True} if the breakpoint was set immediately, or C{False} if
it was deferred.
"""
bp = self.__set_break(pid, address, action, oneshot = False)
return bp is not None
def dont_break_at(self, pid, address):
"""
Clears a code breakpoint set by L{break_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
self.__clear_break(pid, address)
def dont_stalk_at(self, pid, address):
"""
Clears a code breakpoint set by L{stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
self.__clear_break(pid, address)
#------------------------------------------------------------------------------
# Function hooks
def hook_function(self, pid, address,
preCB = None, postCB = None,
paramCount = None, signature = None):
"""
Sets a function hook at the given address.
If instead of an address you pass a label, the hook may be
deferred until the DLL it points to is loaded.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
@type preCB: function
@param preCB: (Optional) Callback triggered on function entry.
The signature for the callback should be something like this::
def pre_LoadLibraryEx(event, ra, lpFilename, hFile, dwFlags):
# return address
ra = params[0]
# function arguments start from here...
szFilename = event.get_process().peek_string(lpFilename)
# (...)
Note that all pointer types are treated like void pointers, so your
callback won't get the string or structure pointed to by it, but
the remote memory address instead. This is so to prevent the ctypes
library from being "too helpful" and trying to dereference the
pointer. To get the actual data being pointed to, use one of the
L{Process.read} methods.
@type postCB: function
@param postCB: (Optional) Callback triggered on function exit.
The signature for the callback should be something like this::
def post_LoadLibraryEx(event, return_value):
# (...)
@type paramCount: int
@param paramCount:
(Optional) Number of parameters for the C{preCB} callback,
not counting the return address. Parameters are read from
the stack and assumed to be DWORDs in 32 bits and QWORDs in 64.
This is a faster way to pull stack parameters in 32 bits, but in 64
bits (or with some odd APIs in 32 bits) it won't be useful, since
not all arguments to the hooked function will be of the same size.
For a more reliable and cross-platform way of hooking use the
C{signature} argument instead.
@type signature: tuple
@param signature:
(Optional) Tuple of C{ctypes} data types that constitute the
hooked function signature. When the function is called, this will
be used to parse the arguments from the stack. Overrides the
C{paramCount} argument.
@rtype: bool
@return: C{True} if the hook was set immediately, or C{False} if
it was deferred.
"""
try:
aProcess = self.system.get_process(pid)
except KeyError:
aProcess = Process(pid)
arch = aProcess.get_arch()
hookObj = Hook(preCB, postCB, paramCount, signature, arch)
bp = self.break_at(pid, address, hookObj)
return bp is not None
def stalk_function(self, pid, address,
preCB = None, postCB = None,
paramCount = None, signature = None):
"""
Sets a one-shot function hook at the given address.
If instead of an address you pass a label, the hook may be
deferred until the DLL it points to is loaded.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
@type preCB: function
@param preCB: (Optional) Callback triggered on function entry.
The signature for the callback should be something like this::
def pre_LoadLibraryEx(event, ra, lpFilename, hFile, dwFlags):
# return address
ra = params[0]
# function arguments start from here...
szFilename = event.get_process().peek_string(lpFilename)
# (...)
Note that all pointer types are treated like void pointers, so your
callback won't get the string or structure pointed to by it, but
the remote memory address instead. This is so to prevent the ctypes
library from being "too helpful" and trying to dereference the
pointer. To get the actual data being pointed to, use one of the
L{Process.read} methods.
@type postCB: function
@param postCB: (Optional) Callback triggered on function exit.
The signature for the callback should be something like this::
def post_LoadLibraryEx(event, return_value):
# (...)
@type paramCount: int
@param paramCount:
(Optional) Number of parameters for the C{preCB} callback,
not counting the return address. Parameters are read from
the stack and assumed to be DWORDs in 32 bits and QWORDs in 64.
This is a faster way to pull stack parameters in 32 bits, but in 64
bits (or with some odd APIs in 32 bits) it won't be useful, since
not all arguments to the hooked function will be of the same size.
For a more reliable and cross-platform way of hooking use the
C{signature} argument instead.
@type signature: tuple
@param signature:
(Optional) Tuple of C{ctypes} data types that constitute the
hooked function signature. When the function is called, this will
be used to parse the arguments from the stack. Overrides the
C{paramCount} argument.
@rtype: bool
@return: C{True} if the breakpoint was set immediately, or C{False} if
it was deferred.
"""
try:
aProcess = self.system.get_process(pid)
except KeyError:
aProcess = Process(pid)
arch = aProcess.get_arch()
hookObj = Hook(preCB, postCB, paramCount, signature, arch)
bp = self.stalk_at(pid, address, hookObj)
return bp is not None
def dont_hook_function(self, pid, address):
"""
Removes a function hook set by L{hook_function}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
self.dont_break_at(pid, address)
# alias
unhook_function = dont_hook_function
def dont_stalk_function(self, pid, address):
"""
Removes a function hook set by L{stalk_function}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
self.dont_stalk_at(pid, address)
#------------------------------------------------------------------------------
# Variable watches
def __set_variable_watch(self, tid, address, size, action):
"""
Used by L{watch_variable} and L{stalk_variable}.
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to watch.
@type size: int
@param size: Size of variable to watch. The only supported sizes are:
byte (1), word (2), dword (4) and qword (8).
@type action: function
@param action: (Optional) Action callback function.
See L{define_hardware_breakpoint} for more details.
@rtype: L{HardwareBreakpoint}
@return: Hardware breakpoint at the requested address.
"""
# TODO
# We should merge the breakpoints instead of overwriting them.
# We'll have the same problem as watch_buffer and we'll need to change
# the API again.
if size == 1:
sizeFlag = self.BP_WATCH_BYTE
elif size == 2:
sizeFlag = self.BP_WATCH_WORD
elif size == 4:
sizeFlag = self.BP_WATCH_DWORD
elif size == 8:
sizeFlag = self.BP_WATCH_QWORD
else:
raise ValueError("Bad size for variable watch: %r" % size)
if self.has_hardware_breakpoint(tid, address):
warnings.warn(
"Hardware breakpoint in thread %d at address %s was overwritten!" \
% (tid, HexDump.address(address,
self.system.get_thread(tid).get_bits())),
BreakpointWarning)
bp = self.get_hardware_breakpoint(tid, address)
if bp.get_trigger() != self.BP_BREAK_ON_ACCESS or \
bp.get_watch() != sizeFlag:
self.erase_hardware_breakpoint(tid, address)
self.define_hardware_breakpoint(tid, address,
self.BP_BREAK_ON_ACCESS, sizeFlag, True, action)
bp = self.get_hardware_breakpoint(tid, address)
else:
self.define_hardware_breakpoint(tid, address,
self.BP_BREAK_ON_ACCESS, sizeFlag, True, action)
bp = self.get_hardware_breakpoint(tid, address)
return bp
def __clear_variable_watch(self, tid, address):
"""
Used by L{dont_watch_variable} and L{dont_stalk_variable}.
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to stop watching.
"""
if self.has_hardware_breakpoint(tid, address):
self.erase_hardware_breakpoint(tid, address)
def watch_variable(self, tid, address, size, action = None):
"""
Sets a hardware breakpoint at the given thread, address and size.
@see: L{dont_watch_variable}
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to watch.
@type size: int
@param size: Size of variable to watch. The only supported sizes are:
byte (1), word (2), dword (4) and qword (8).
@type action: function
@param action: (Optional) Action callback function.
See L{define_hardware_breakpoint} for more details.
"""
bp = self.__set_variable_watch(tid, address, size, action)
if not bp.is_enabled():
self.enable_hardware_breakpoint(tid, address)
def stalk_variable(self, tid, address, size, action = None):
"""
Sets a one-shot hardware breakpoint at the given thread,
address and size.
@see: L{dont_watch_variable}
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to watch.
@type size: int
@param size: Size of variable to watch. The only supported sizes are:
byte (1), word (2), dword (4) and qword (8).
@type action: function
@param action: (Optional) Action callback function.
See L{define_hardware_breakpoint} for more details.
"""
bp = self.__set_variable_watch(tid, address, size, action)
if not bp.is_one_shot():
self.enable_one_shot_hardware_breakpoint(tid, address)
def dont_watch_variable(self, tid, address):
"""
Clears a hardware breakpoint set by L{watch_variable}.
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to stop watching.
"""
self.__clear_variable_watch(tid, address)
def dont_stalk_variable(self, tid, address):
"""
Clears a hardware breakpoint set by L{stalk_variable}.
@type tid: int
@param tid: Thread global ID.
@type address: int
@param address: Memory address of variable to stop watching.
"""
self.__clear_variable_watch(tid, address)
#------------------------------------------------------------------------------
# Buffer watches
def __set_buffer_watch(self, pid, address, size, action, bOneShot):
"""
Used by L{watch_buffer} and L{stalk_buffer}.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@type bOneShot: bool
@param bOneShot:
C{True} to set a one-shot breakpoint,
C{False} to set a normal breakpoint.
"""
# Check the size isn't zero or negative.
if size < 1:
raise ValueError("Bad size for buffer watch: %r" % size)
# Create the buffer watch identifier.
bw = BufferWatch(pid, address, address + size, action, bOneShot)
# Get the base address and size in pages required for this buffer.
base = MemoryAddresses.align_address_to_page_start(address)
limit = MemoryAddresses.align_address_to_page_end(address + size)
pages = MemoryAddresses.get_buffer_size_in_pages(address, size)
try:
# For each page:
# + if a page breakpoint exists reuse it
# + if it doesn't exist define it
bset = set() # all breakpoints used
nset = set() # newly defined breakpoints
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
# If a breakpoints exists, reuse it.
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
if bp not in bset:
condition = bp.get_condition()
if not condition in cset:
if not isinstance(condition,_BufferWatchCondition):
# this shouldn't happen unless you tinkered
# with it or defined your own page breakpoints
# manually.
msg = "Can't watch buffer at page %s"
msg = msg % HexDump.address(page_addr)
raise RuntimeError(msg)
cset.add(condition)
bset.add(bp)
# If it doesn't, define it.
else:
condition = _BufferWatchCondition()
bp = self.define_page_breakpoint(pid, page_addr, 1,
condition = condition)
bset.add(bp)
nset.add(bp)
cset.add(condition)
# Next page.
page_addr = page_addr + pageSize
# For each breakpoint, enable it if needed.
aProcess = self.system.get_process(pid)
for bp in bset:
if bp.is_disabled() or bp.is_one_shot():
bp.enable(aProcess, None)
# On error...
except:
# Erase the newly defined breakpoints.
for bp in nset:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except:
pass
# Pass the exception to the caller
raise
# For each condition object, add the new buffer.
for condition in cset:
condition.add(bw)
def __clear_buffer_watch_old_method(self, pid, address, size):
"""
Used by L{dont_watch_buffer} and L{dont_stalk_buffer}.
@warn: Deprecated since WinAppDbg 1.5.
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to stop watching.
@type size: int
@param size: Size in bytes of buffer to stop watching.
"""
warnings.warn("Deprecated since WinAppDbg 1.5", DeprecationWarning)
# Check the size isn't zero or negative.
if size < 1:
raise ValueError("Bad size for buffer watch: %r" % size)
# Get the base address and size in pages required for this buffer.
base = MemoryAddresses.align_address_to_page_start(address)
limit = MemoryAddresses.align_address_to_page_end(address + size)
pages = MemoryAddresses.get_buffer_size_in_pages(address, size)
# For each page, get the breakpoint and it's condition object.
# For each condition, remove the buffer.
# For each breakpoint, if no buffers are on watch, erase it.
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
condition = bp.get_condition()
if condition not in cset:
if not isinstance(condition, _BufferWatchCondition):
# this shouldn't happen unless you tinkered with it
# or defined your own page breakpoints manually.
continue
cset.add(condition)
condition.remove_last_match(address, size)
if condition.count() == 0:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except WindowsError:
pass
page_addr = page_addr + pageSize
def __clear_buffer_watch(self, bw):
"""
Used by L{dont_watch_buffer} and L{dont_stalk_buffer}.
@type bw: L{BufferWatch}
@param bw: Buffer watch identifier.
"""
# Get the PID and the start and end addresses of the buffer.
pid = bw.pid
start = bw.start
end = bw.end
# Get the base address and size in pages required for the buffer.
base = MemoryAddresses.align_address_to_page_start(start)
limit = MemoryAddresses.align_address_to_page_end(end)
pages = MemoryAddresses.get_buffer_size_in_pages(start, end - start)
# For each page, get the breakpoint and it's condition object.
# For each condition, remove the buffer.
# For each breakpoint, if no buffers are on watch, erase it.
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
condition = bp.get_condition()
if condition not in cset:
if not isinstance(condition, _BufferWatchCondition):
# this shouldn't happen unless you tinkered with it
# or defined your own page breakpoints manually.
continue
cset.add(condition)
condition.remove(bw)
if condition.count() == 0:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except WindowsError:
msg = "Cannot remove page breakpoint at address %s"
msg = msg % HexDump.address( bp.get_address() )
warnings.warn(msg, BreakpointWarning)
page_addr = page_addr + pageSize
def watch_buffer(self, pid, address, size, action = None):
"""
Sets a page breakpoint and notifies when the given buffer is accessed.
@see: L{dont_watch_variable}
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@rtype: L{BufferWatch}
@return: Buffer watch identifier.
"""
self.__set_buffer_watch(pid, address, size, action, False)
def stalk_buffer(self, pid, address, size, action = None):
"""
Sets a one-shot page breakpoint and notifies
when the given buffer is accessed.
@see: L{dont_watch_variable}
@type pid: int
@param pid: Process global ID.
@type address: int
@param address: Memory address of buffer to watch.
@type size: int
@param size: Size in bytes of buffer to watch.
@type action: function
@param action: (Optional) Action callback function.
See L{define_page_breakpoint} for more details.
@rtype: L{BufferWatch}
@return: Buffer watch identifier.
"""
self.__set_buffer_watch(pid, address, size, action, True)
def dont_watch_buffer(self, bw, *argv, **argd):
"""
Clears a page breakpoint set by L{watch_buffer}.
@type bw: L{BufferWatch}
@param bw:
Buffer watch identifier returned by L{watch_buffer}.
"""
# The sane way to do it.
if not (argv or argd):
self.__clear_buffer_watch(bw)
# Backwards compatibility with WinAppDbg 1.4.
else:
argv = list(argv)
argv.insert(0, bw)
if 'pid' in argd:
argv.insert(0, argd.pop('pid'))
if 'address' in argd:
argv.insert(1, argd.pop('address'))
if 'size' in argd:
argv.insert(2, argd.pop('size'))
if argd:
raise TypeError("Wrong arguments for dont_watch_buffer()")
try:
pid, address, size = argv
except ValueError:
raise TypeError("Wrong arguments for dont_watch_buffer()")
self.__clear_buffer_watch_old_method(pid, address, size)
def dont_stalk_buffer(self, bw, *argv, **argd):
"""
Clears a page breakpoint set by L{stalk_buffer}.
@type bw: L{BufferWatch}
@param bw:
Buffer watch identifier returned by L{stalk_buffer}.
"""
self.dont_watch_buffer(bw, *argv, **argd)
#------------------------------------------------------------------------------
# Tracing
# XXX TODO
# Add "action" parameter to tracing mode
def __start_tracing(self, thread):
"""
@type thread: L{Thread}
@param thread: Thread to start tracing.
"""
tid = thread.get_tid()
if not tid in self.__tracing:
thread.set_tf()
self.__tracing.add(tid)
def __stop_tracing(self, thread):
"""
@type thread: L{Thread}
@param thread: Thread to stop tracing.
"""
tid = thread.get_tid()
if tid in self.__tracing:
self.__tracing.remove(tid)
if thread.is_alive():
thread.clear_tf()
def is_tracing(self, tid):
"""
@type tid: int
@param tid: Thread global ID.
@rtype: bool
@return: C{True} if the thread is being traced, C{False} otherwise.
"""
return tid in self.__tracing
def get_traced_tids(self):
"""
Retrieves the list of global IDs of all threads being traced.
@rtype: list( int... )
@return: List of thread global IDs.
"""
tids = list(self.__tracing)
tids.sort()
return tids
def start_tracing(self, tid):
"""
Start tracing mode in the given thread.
@type tid: int
@param tid: Global ID of thread to start tracing.
"""
if not self.is_tracing(tid):
thread = self.system.get_thread(tid)
self.__start_tracing(thread)
def stop_tracing(self, tid):
"""
Stop tracing mode in the given thread.
@type tid: int
@param tid: Global ID of thread to stop tracing.
"""
if self.is_tracing(tid):
thread = self.system.get_thread(tid)
self.__stop_tracing(thread)
def start_tracing_process(self, pid):
"""
Start tracing mode for all threads in the given process.
@type pid: int
@param pid: Global ID of process to start tracing.
"""
for thread in self.system.get_process(pid).iter_threads():
self.__start_tracing(thread)
def stop_tracing_process(self, pid):
"""
Stop tracing mode for all threads in the given process.
@type pid: int
@param pid: Global ID of process to stop tracing.
"""
for thread in self.system.get_process(pid).iter_threads():
self.__stop_tracing(thread)
def start_tracing_all(self):
"""
Start tracing mode for all threads in all debugees.
"""
for pid in self.get_debugee_pids():
self.start_tracing_process(pid)
def stop_tracing_all(self):
"""
Stop tracing mode for all threads in all debugees.
"""
for pid in self.get_debugee_pids():
self.stop_tracing_process(pid)
#------------------------------------------------------------------------------
# Break on LastError values (only available since Windows Server 2003)
def break_on_error(self, pid, errorCode):
"""
Sets or clears the system breakpoint for a given Win32 error code.
Use L{Process.is_system_defined_breakpoint} to tell if a breakpoint
exception was caused by a system breakpoint or by the application
itself (for example because of a failed assertion in the code).
@note: This functionality is only available since Windows Server 2003.
In 2003 it only breaks on error values set externally to the
kernel32.dll library, but this was fixed in Windows Vista.
@warn: This method will fail if the debug symbols for ntdll (kernel32
in Windows 2003) are not present. For more information see:
L{System.fix_symbol_store_path}.
@see: U{http://www.nynaeve.net/?p=147}
@type pid: int
@param pid: Process ID.
@type errorCode: int
@param errorCode: Win32 error code to stop on. Set to C{0} or
C{ERROR_SUCCESS} to clear the breakpoint instead.
@raise NotImplementedError:
The functionality is not supported in this system.
@raise WindowsError:
An error occurred while processing this request.
"""
aProcess = self.system.get_process(pid)
address = aProcess.get_break_on_error_ptr()
if not address:
raise NotImplementedError(
"The functionality is not supported in this system.")
aProcess.write_dword(address, errorCode)
def dont_break_on_error(self, pid):
"""
Alias to L{break_on_error}C{(pid, ERROR_SUCCESS)}.
@type pid: int
@param pid: Process ID.
@raise NotImplementedError:
The functionality is not supported in this system.
@raise WindowsError:
An error occurred while processing this request.
"""
self.break_on_error(pid, 0)
#------------------------------------------------------------------------------
# Simplified symbol resolving, useful for hooking functions
def resolve_exported_function(self, pid, modName, procName):
"""
Resolves the exported DLL function for the given process.
@type pid: int
@param pid: Process global ID.
@type modName: str
@param modName: Name of the module that exports the function.
@type procName: str
@param procName: Name of the exported function to resolve.
@rtype: int, None
@return: On success, the address of the exported function.
On failure, returns C{None}.
"""
aProcess = self.system.get_process(pid)
aModule = aProcess.get_module_by_name(modName)
if not aModule:
aProcess.scan_modules()
aModule = aProcess.get_module_by_name(modName)
if aModule:
address = aModule.resolve(procName)
return address
return None
def resolve_label(self, pid, label):
"""
Resolves a label for the given process.
@type pid: int
@param pid: Process global ID.
@type label: str
@param label: Label to resolve.
@rtype: int
@return: Memory address pointed to by the label.
@raise ValueError: The label is malformed or impossible to resolve.
@raise RuntimeError: Cannot resolve the module or function.
"""
return self.get_process(pid).resolve_label(label)
| epl-1.0 |
trmznt/rhombus | rhombus/views/userclass.py | 1 | 5554 |
from rhombus.lib.utils import get_dbhandler
from rhombus.lib.roles import SYSADM
from rhombus.lib.tags import (div, table, thead, tbody, th, tr, td, literal, selection_bar, br, ul, li, a, i,
form, POST, GET, fieldset, input_text, input_hidden, input_select, input_password,
submit_bar, h3, p, input_textarea)
from rhombus.views import *
import io, yaml
@roles(SYSADM)
def index(request):
dbh = get_dbhandler()
userclasses = dbh.get_userclass()
userclass_table = table(class_='table table-condensed table-striped')[
thead()[
th('', style="width: 2em;"), th('UserClass / Domain'), th('Users')
],
tbody()[
tuple([ tr()[
td(literal('<input type="checkbox" name="userclass-ids" value="%d" />' % uc.id)),
td('%s' % uc.domain),
td(a('%d' % uc.users.count(), href=request.route_url('rhombus.userclass-view', id=uc.id)) )
] for uc in userclasses ])
]
]
add_button = ( 'New userclass',
request.route_url('rhombus.userclass-edit', id=0)
)
bar = selection_bar('userclass-ids', action=request.route_url('rhombus.userclass-action'),
add = add_button)
html, code = bar.render(userclass_table)
return render_to_response('rhombus:templates/generics/page.mako',
{ 'html': html,
'code': code },
request = request )
@roles(SYSADM)
def view(request):
dbh = get_dbhandler()
userclass = dbh.get_userclass( int(request.matchdict['id']) )
html = div( div(h3('Userclass: %s' % userclass.domain)))
eform = edit_form(userclass, dbh, request, static=True)
html.add( div(eform) )
user_table = table(class_='table table-condensed table-striped')[
thead()[
th('Login'), th('Name'), th('Primary group')
],
tbody()[
tuple([ tr()[
td(a(u.login, href=request.route_url('rhombus.user-view', id=u.id))),
td(u.fullname),
td(a(u.primarygroup.name,
href=request.route_url('rhombus.group-view', id=u.primarygroup_id))),
] for u in userclass.users ])
]
]
html.add(user_table)
return render_to_response('rhombus:templates/generics/page.mako',
{ 'content': str(html) },
request = request )
@roles(SYSADM)
def edit(request):
userclass_id = int(request.matchdict['id'])
if userclass_id < 0:
return error_page(request, 'Please provide userclass ID')
dbh = get_dbhandler()
if request.method == 'GET':
if userclass_id == 0:
userclass = dbh.UserClass()
userclass.id = 0
else:
userclass = dbh.get_userclass(userclass_id)
editform = edit_form(userclass, dbh, request)
return render_to_response( "rhombus:templates/generics/page.mako",
{ 'html': editform,
}, request = request
)
elif request.POST:
userclass_d = parse_form( request.POST )
if userclass_d['id'] != userclass_id:
return error_page(request, "Inconsistent data!")
try:
if userclass_id == 0:
userclass = dbh.UserClass()
userclass.update( userclass_d )
dbh.session().add( userclass )
dbh.session().flush()
request.session.flash(
( 'success',
'Userclass [%s] has been created.' % userclass.domain )
)
else:
userclass = dbh.get_userclass(userclass_id)
userclass.update( userclass_d )
dbh.session().flush()
except RuntimeError as err:
return error_page(request, str(err))
except:
raise
return HTTPFound(location = request.route_url('rhombus.userclass-view', id=userclass.id))
raise NotImplementedError()
def getusers(request):
pass
def action(request):
raise NotImplementedError()
def edit_form(userclass, dbh, request, static=False):
eform = form( name='rhombus/userclass', method=POST,
action=request.route_url('rhombus.userclass-edit', id=userclass.id))
eform.add(
fieldset(
input_hidden(name='rhombus-userclass_id', value=userclass.id),
input_text('rhombus-userclass_domain', 'Domain', value=userclass.domain,
static=static),
input_text('rhombus-userclass_desc', 'Description', value=userclass.desc,
static=static),
input_textarea('rhombus-userclass_credscheme', 'Cred Scheme', value=yaml.dump(userclass.credscheme),
static=static),
submit_bar() if not static else a('Edit', class_='btn btn-primary col-md-offset-3',
href=request.route_url('rhombus.userclass-edit', id=userclass.id)),
)
)
return eform
def parse_form( f ):
d = dict()
d['id'] = int(f['rhombus-userclass_id'])
d['domain'] = f['rhombus-userclass_domain']
d['desc'] = f['rhombus-userclass_desc']
d['credscheme'] = yaml.load(io.StringIO(f['rhombus-userclass_credscheme']))
return d
# in-memory data
_SYNCTOKENS_ = [] # [ (token, create_time), ...]
def get_token():
pass
def validate_token( token ):
pass
| lgpl-3.0 |
avian2/jsonrpc2-zeromq-python | jsonrpc2_zeromq/test/helpers.py | 2 | 3575 | # Part of the jsonrpc2-zeromq-python project.
# (c) 2012 Dan Brown, All Rights Reserved.
# Please see the LICENSE file in the root of this project for license
# information.
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # NOQA
from past.utils import old_div
from time import sleep
import threading
import jsonrpc2_zeromq
import jsonrpc2_zeromq.common
class LongTimeServerMixin(object):
long_time = 500 # milliseconds
def handle_take_a_long_time_method(self):
sleep(old_div(self.long_time, 1000.0))
class RPCTestServer(jsonrpc2_zeromq.RPCServer, LongTimeServerMixin):
def handle_echo_method(self, msg):
return msg
def handle_dict_args_method(self, an_int=None, a_bool=None, a_float=None,
a_str=None):
return dict(an_int=an_int, a_bool=a_bool, a_float=a_float, a_str=a_str)
def handle_return_null_method(self):
return None
class RPCNotificationTestServer(jsonrpc2_zeromq.RPCNotificationServer):
def handle_echo_method(self, msg):
return msg
class NotificationOnlyPullTestServer(
jsonrpc2_zeromq.NotificationOnlyPullServer):
def handle_event_method(self, event_type, event_value):
# Do things!
pass
class NotificationReceiverClientTestServer(
jsonrpc2_zeromq.RPCNotificationServer, LongTimeServerMixin):
notification_reply_sleep_time = 0.5
reply_thread = None
def _send_back_notifications(self, client_id, method, num_notifications):
for i in range(num_notifications):
sleep(self.notification_reply_sleep_time)
notification = jsonrpc2_zeromq.common.Request(method, params=[i],
notify=True)
self.socket.send_multipart(
[client_id,
jsonrpc2_zeromq.common.json_rpc_dumps(notification)])
def _start_sending_notifications_thread(self, client_id, method,
num_notifications):
self.reply_thread = threading.Thread(
target=lambda: self._send_back_notifications(client_id, method,
num_notifications))
self.reply_thread.start()
def _handle_method_and_response(self, client_id, req):
# Slight hack to keep the client_id for _send_back_notifications
self.client_id = client_id
return super(NotificationReceiverClientTestServer,
self)._handle_method_and_response(client_id, req)
def handle_subscribe_method(self, num_notifications):
self._start_sending_notifications_thread(self.client_id, "event",
num_notifications)
def handle_subscribe_bad_event_method(self, num_notifications):
self._start_sending_notifications_thread(self.client_id, "bad_event",
num_notifications)
def stop(self):
if self.reply_thread and self.reply_thread.is_alive:
self.reply_thread.join()
return super(NotificationReceiverClientTestServer, self).stop()
class NotificationReceiverTestClient(
jsonrpc2_zeromq.NotificationReceiverClient):
num_notifications_received = 0
def handle_event_notification(self, num):
self.num_notifications_received += 1
| bsd-3-clause |
lonnen/socorro | webapp-django/crashstats/tokens/middleware.py | 1 | 2529 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from functools import partial
from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
from django import http
from crashstats.tokens import models
def json_forbidden_response(msg):
body = json.dumps({'error': msg})
return http.HttpResponseForbidden(
body + '\n',
content_type='application/json; charset=UTF-8'
)
def has_perm(all, codename, obj=None):
codename = codename.split('.', 1)[1]
return all.filter(codename=codename).count()
class APIAuthenticationMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
response = self.process_request(request)
if not response:
response = self.get_response(request)
return response
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The API Authenication middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
key = request.META.get('HTTP_AUTH_TOKEN')
if not key:
return
try:
token = models.Token.objects.select_related('user').get(key=key)
if token.is_expired:
return json_forbidden_response(
'API Token found but expired'
)
except models.Token.DoesNotExist:
return json_forbidden_response('API Token not matched')
user = token.user
if not user.is_active:
return json_forbidden_response('User of API token not active')
# it actually doesn't matter so much which backend
# we use as long as it's something
user.backend = 'django.contrib.auth.backends.ModelBackend'
user.has_perm = partial(has_perm, token.permissions.all())
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
| mpl-2.0 |
ralphwort/chef-repo | build/cliff/pbr-0.8.2-py2.7.egg/pbr/tests/base.py | 4 | 5632 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
"""Common utilities used in testing"""
import os
import shutil
import subprocess
import sys
import fixtures
import testresources
import testtools
from pbr import packaging
class DiveDir(fixtures.Fixture):
"""Dive into given directory and return back on cleanup.
:ivar path: The target directory.
"""
def __init__(self, path):
self.path = path
def setUp(self):
super(DiveDir, self).setUp()
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.path)
class BaseTestCase(testtools.TestCase, testresources.ResourcedTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 30)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid, fail hard.
print("OS_TEST_TIMEOUT set to invalid value"
" defaulting to no timeout")
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if os.environ.get('OS_STDOUT_CAPTURE') in packaging.TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in packaging.TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(
fixtures.FakeLogger('pbr'))
# Older git does not have config --local, so create a temporary home
# directory to permit using git config --global without stepping on
# developer configuration.
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.FakeLogger())
self.useFixture(fixtures.EnvironmentVariable('PBR_VERSION', '0.0'))
self.temp_dir = self.useFixture(fixtures.TempDir()).path
self.package_dir = os.path.join(self.temp_dir, 'testpackage')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'testpackage'),
self.package_dir)
self.addCleanup(os.chdir, os.getcwd())
os.chdir(self.package_dir)
self.addCleanup(self._discard_testpackage)
def _discard_testpackage(self):
# Remove pbr.testpackage from sys.modules so that it can be freshly
# re-imported by the next test
for k in list(sys.modules):
if (k == 'pbr_testpackage' or
k.startswith('pbr_testpackage.')):
del sys.modules[k]
def run_setup(self, *args):
return self._run_cmd(sys.executable, ('setup.py',) + args)
def _run_cmd(self, cmd, args=[]):
"""Run a command in the root of the test working copy.
Runs a command, with the given argument list, in the root of the test
working copy--returns the stdout and stderr streams and the exit code
from the subprocess.
"""
return _run_cmd([cmd] + list(args), cwd=self.package_dir)
def _run_cmd(args, cwd):
"""Run the command args in cwd.
:param args: The command to run e.g. ['git', 'status']
:param cwd: The directory to run the comamnd in.
:return: ((stdout, stderr), returncode)
"""
p = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
streams = tuple(s.decode('latin1').strip() for s in p.communicate())
for content in streams:
print(content)
return (streams) + (p.returncode,)
| apache-2.0 |
robhudson/django | django/contrib/flatpages/models.py | 318 | 1556 | from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.core.urlresolvers import get_script_prefix
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False)
sites = models.ManyToManyField(Site, verbose_name=_('sites'))
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
| bsd-3-clause |
Centre-Alt-Rendiment-Esportiu/att | old_project/Python/win_libs/numpy/distutils/command/install.py | 240 | 3127 | from __future__ import division, absolute_import, print_function
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
have_setuptools = True
else:
import distutils.command.install as old_install_mod
have_setuptools = False
from distutils.file_util import write_file
old_install = old_install_mod.install
class install(old_install):
# Always run install_clib - the command is cheap, so no need to bypass it;
# but it's not run by setuptools -- so it's run again in install_data
sub_commands = old_install.sub_commands + [
('install_clib', lambda x: True)
]
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
def setuptools_run(self):
""" The setuptools version of the .run() method.
We must pull in the entire code so we can override the level used in the
_getframe() call since we wrap this call by one more level.
"""
from distutils.command.install import install as distutils_install
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return distutils_install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(3)
caller_module = caller.f_globals.get('__name__', '')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
distutils_install.run(self)
else:
self.do_egg_install()
def run(self):
if not have_setuptools:
r = old_install.run(self)
else:
r = self.setuptools_run()
if self.record:
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
f = open(self.record, 'r')
lines = []
need_rewrite = False
for l in f:
l = l.rstrip()
if ' ' in l:
need_rewrite = True
l = '"%s"' % (l)
lines.append(l)
f.close()
if need_rewrite:
self.execute(write_file,
(self.record, lines),
"re-writing list of installed files to '%s'" %
self.record)
return r
| gpl-3.0 |
ewbankkit/cloud-custodian | tests/test_utils.py | 3 | 15120 | # Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import sys
import tempfile
import time
from botocore.exceptions import ClientError
from dateutil.parser import parse as parse_date
import six
from c7n import ipaddress, utils
from c7n.config import Config
from .common import BaseTest
class Backoff(BaseTest):
def test_retry_passthrough(self):
def func():
return 42
retry = utils.get_retry((), 5)
self.assertEqual(retry(func), 42)
def test_retry_errors(self):
self.patch(time, "sleep", lambda x: x)
self.count = 0
def func():
self.count += 1
raise ClientError({"Error": {"Code": 42}}, "something")
retry = utils.get_retry((42,), 5)
try:
retry(func)
except ClientError:
self.assertEqual(self.count, 5)
else:
self.fail("should have raised")
def test_delays(self):
self.assertEqual(
list(utils.backoff_delays(1, 256)),
[1, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0],
)
def test_delays_jitter(self):
for idx, i in enumerate(utils.backoff_delays(1, 256, jitter=True)):
maxv = 2 ** idx
self.assertTrue(i > 0)
self.assertTrue(i < maxv)
class WorkerDecorator(BaseTest):
def test_method_worker(self):
class foo(object):
@utils.worker
def bar(self, err=False):
"""abc"""
if err:
raise ValueError("foo")
return 42
i = foo()
log_output = self.capture_logging("c7n.worker")
self.assertEqual(i.bar(), 42)
self.assertRaises(ValueError, i.bar, True)
self.assertTrue(
log_output.getvalue().startswith(
"Error invoking tests.test_utils.bar\nTraceback"
)
)
def test_function_worker(self):
@utils.worker
def rabbit(err=False):
"""what's up doc"""
if err:
raise ValueError("more carrots")
return 42
self.assertEqual(rabbit.__doc__, "what's up doc")
log_output = self.capture_logging("c7n.worker")
self.assertEqual(rabbit(), 42)
self.assertEqual(log_output.getvalue(), "")
self.assertRaises(ValueError, rabbit, True)
self.assertTrue(
log_output.getvalue().startswith(
"Error invoking tests.test_utils.rabbit\nTraceback"
)
)
self.assertTrue("more carrots" in log_output.getvalue())
class UrlConfTest(BaseTest):
def test_parse_url(self):
self.assertEqual(
dict(utils.parse_url_config('aws://target?format=json®ion=us-west-2')),
dict(url='aws://target?format=json®ion=us-west-2',
netloc='target',
path='',
scheme='aws',
region='us-west-2',
format='json'))
self.assertEqual(
dict(utils.parse_url_config('')),
{'netloc': '', 'path': '', 'scheme': '', 'url': ''})
self.assertEqual(
dict(utils.parse_url_config('aws')),
{'path': '', 'scheme': 'aws', 'netloc': '', 'url': 'aws://'})
self.assertEqual(
dict(utils.parse_url_config('aws://')),
{'path': '', 'scheme': 'aws', 'netloc': '', 'url': 'aws://'})
class UtilTest(BaseTest):
def test_local_session_region(self):
policies = [
self.load_policy(
{'name': 'ec2', 'resource': 'ec2'},
config=Config.empty(region="us-east-1")),
self.load_policy(
{'name': 'ec2', 'resource': 'ec2'},
config=Config.empty(region='us-west-2'))]
previous = None
previous_region = None
for p in policies:
self.assertEqual(p.options.region, p.session_factory.region)
session = utils.local_session(p.session_factory)
self.assertNotEqual(session.region_name, previous_region)
self.assertNotEqual(session, previous)
previous = session
previous_region = p.options.region
self.assertEqual(utils.local_session(p.session_factory), previous)
def test_format_date(self):
d = parse_date("2018-02-02 12:00")
self.assertEqual("{}".format(utils.FormatDate(d)), "2018-02-02 12:00:00")
self.assertEqual("{:%Y-%m-%d}".format(utils.FormatDate(d)), "2018-02-02")
self.assertEqual("{:+5h%H}".format(utils.FormatDate(d)), "17")
self.assertEqual("{:+5d%d}".format(utils.FormatDate(d)), "07")
self.assertEqual("{:+5M%M}".format(utils.FormatDate(d)), "05")
def test_group_by(self):
sorter = lambda x: x # NOQA E731
sorter = sys.version_info.major == 2 and sorted or sorter
items = [{}, {"Type": "a"}, {"Type": "a"}, {"Type": "b"}]
self.assertEqual(
sorter(list(utils.group_by(items, "Type").keys())), [None, "a", "b"]
)
items = [
{},
{"Type": {"Part": "a"}},
{"Type": {"Part": "a"}},
{"Type": {"Part": "b"}},
]
self.assertEqual(
sorter(list(utils.group_by(items, "Type.Part").keys())), [None, "a", "b"]
)
def write_temp_file(self, contents, suffix=".tmp"):
""" Write a temporary file and return the filename.
The file will be cleaned up after the test.
"""
file = tempfile.NamedTemporaryFile(suffix=suffix)
file.write(contents)
file.flush()
self.addCleanup(file.close)
return file.name
def test_ipv4_network(self):
n1 = utils.IPv4Network(u"10.0.0.0/16")
n2 = utils.IPv4Network(u"10.0.1.0/24")
self.assertTrue(n2 in n1)
self.assertFalse(n1 in n2)
n3 = utils.IPv4Network(u"10.0.0.0/8")
self.assertTrue(n2 in n3)
self.assertTrue(n1 in n3)
n4 = utils.IPv4Network(u"192.168.1.0/24")
self.assertFalse(n4 in n3)
a1 = ipaddress.ip_address(u"10.0.1.16")
self.assertTrue(a1 in n1)
self.assertTrue(a1 in n3)
self.assertFalse(a1 in n4)
def test_chunks(self):
self.assertEqual(
list(utils.chunks(range(100), size=50)),
[list(range(50)), list(range(50, 100, 1))],
)
self.assertEqual(list(utils.chunks(range(1), size=50)), [[0]])
self.assertEqual(
list(utils.chunks(range(60), size=50)),
[list(range(50)), list(range(50, 60, 1))],
)
def test_type_schema(self):
self.assertEqual(
utils.type_schema("tester"),
{
"type": "object",
"additionalProperties": False,
"required": ["type"],
"properties": {"type": {"enum": ["tester"]}},
},
)
res = utils.type_schema("tester", inherits=["tested"])
self.assertIn({"$ref": "tested"}, res["allOf"])
def test_generate_arn(self):
self.assertEqual(
utils.generate_arn("s3", "my_bucket"), "arn:aws:s3:::my_bucket"
)
self.assertEqual(
utils.generate_arn("s3", "my_bucket", region="us-gov-west-1"),
"arn:aws-us-gov:s3:::my_bucket"
)
self.assertEqual(
utils.generate_arn(
"cloudformation",
"MyProductionStack/abc9dbf0-43c2-11e3-a6e8-50fa526be49c",
region="us-east-1",
account_id="123456789012",
resource_type="stack",
),
"arn:aws:cloudformation:us-east-1:123456789012:"
"stack/MyProductionStack/abc9dbf0-43c2-11e3-a6e8-50fa526be49c",
)
self.assertEqual(
utils.generate_arn(
"rds",
"mysql-option-group1",
region="us-east-1",
account_id="123456789012",
resource_type="og",
separator=":",
),
"arn:aws:rds:us-east-1:123456789012:og:mysql-option-group1",
)
def test_camel_nested(self):
nest = {
"description": "default VPC security group",
"groupId": "sg-6c7fa917",
"groupName": "default",
"ipPermissions": [
{
"ipProtocol": "-1",
"ipRanges": ["108.56.181.242/32"],
"ipv4Ranges": [{"cidrIp": "108.56.181.242/32"}],
"ipv6Ranges": [],
"prefixListIds": [],
"userIdGroupPairs": [
{"groupId": "sg-6c7fa917", "userId": "644160558196"}
],
}
],
"ipPermissionsEgress": [
{
"ipProtocol": "-1",
"ipRanges": ["0.0.0.0/0"],
"ipv4Ranges": [{"cidrIp": "0.0.0.0/0"}],
"ipv6Ranges": [],
"prefixListIds": [],
"userIdGroupPairs": [],
}
],
"ownerId": "644160558196",
"tags": [
{"key": "Name", "value": ""},
{"key": "c7n-test-tag", "value": "c7n-test-val"},
],
"vpcId": "vpc-d2d616b5",
}
self.assertEqual(
utils.camelResource(nest)["IpPermissions"],
[
{
u"IpProtocol": u"-1",
u"IpRanges": [u"108.56.181.242/32"],
u"Ipv4Ranges": [{u"CidrIp": u"108.56.181.242/32"}],
u"Ipv6Ranges": [],
u"PrefixListIds": [],
u"UserIdGroupPairs": [
{u"GroupId": u"sg-6c7fa917", u"UserId": u"644160558196"}
],
}
],
)
def test_camel_case(self):
d = {
"zebraMoon": [{"instanceId": 123}, "moon"],
"color": {"yellow": 1, "green": 2},
}
self.assertEqual(
utils.camelResource(d),
{
"ZebraMoon": [{"InstanceId": 123}, "moon"],
"Color": {"Yellow": 1, "Green": 2},
},
)
def test_snapshot_identifier(self):
identifier = utils.snapshot_identifier("bkup", "abcdef")
# e.g. bkup-2016-07-27-abcdef
self.assertEqual(len(identifier), 28)
def test_load_error(self):
original_yaml = utils.yaml
utils.yaml = None
self.assertRaises(RuntimeError, utils.yaml_load, "testing")
utils.yaml = original_yaml
def test_format_event(self):
event = {"message": "This is a test", "timestamp": 1234567891011}
event_json = (
'{\n "timestamp": 1234567891011, \n' ' "message": "This is a test"\n}'
)
self.assertEqual(json.loads(utils.format_event(event)), json.loads(event_json))
def test_date_time_decoder(self):
dtdec = utils.DateTimeEncoder()
self.assertRaises(TypeError, dtdec.default, "test")
def test_set_annotation(self):
self.assertRaises(
ValueError, utils.set_annotation, "not a dictionary", "key", "value"
)
def test_parse_s3(self):
self.assertRaises(ValueError, utils.parse_s3, "bogus")
self.assertEqual(utils.parse_s3("s3://things"), ("s3://things", "things", ""))
def test_reformat_schema(self):
# Not a real schema, just doing a smoke test of the function
# properties = 'target'
class FakeResource(object):
schema = {
"additionalProperties": False,
"properties": {
"type": "foo",
"default": {"type": "object"},
"key": {"type": "string"},
"op": {"enum": ["regex", "ni", "gt", "not-in"]},
"value": {
"oneOf": [
{"type": "array"},
{"type": "string"},
{"type": "boolean"},
{"type": "number"},
]
},
},
"required": ["key"],
}
ret = utils.reformat_schema(FakeResource)
self.assertIsInstance(ret, dict)
# Test error conditions
# Instead of testing for specific keywords, just make sure that strings
# are returned instead of a dictionary.
FakeResource.schema = {}
ret = utils.reformat_schema(FakeResource)
self.assertIsInstance(ret, six.text_type)
delattr(FakeResource, "schema")
ret = utils.reformat_schema(FakeResource)
self.assertIsInstance(ret, six.text_type)
def test_load_file(self):
# Basic load
yml_file = os.path.join(os.path.dirname(__file__), "data", "vars-test.yml")
data = utils.load_file(yml_file)
self.assertTrue(len(data["policies"]) == 1)
# Load with vars
resource = "ec2"
data = utils.load_file(yml_file, vars={"resource": resource})
self.assertTrue(data["policies"][0]["resource"] == resource)
# Fail to substitute
self.assertRaises(
utils.VarsSubstitutionError, utils.load_file, yml_file, vars={"foo": "bar"}
)
# JSON load
json_file = os.path.join(os.path.dirname(__file__), "data", "ec2-instance.json")
data = utils.load_file(json_file)
self.assertTrue(data["InstanceId"] == "i-1aebf7c0")
def test_format_string_values(self):
obj = {
"Key1": "Value1",
"Key2": 42,
"Key3": "{xx}",
u"Key4": [True, {u"K": u"{yy}"}, "{xx}"],
}
fmt = utils.format_string_values(obj, **{"xx": "aa", "yy": "bb"})
self.assertEqual(fmt["Key3"], "aa")
self.assertEqual(fmt["Key4"][2], "aa")
self.assertEqual(fmt["Key4"][1]["K"], "bb")
self.assertEqual(
utils.format_string_values(
{'k': '{1}'}),
{'k': '{1}'})
self.assertEqual(
utils.format_string_values(
{'k': '{limit}',
'b': '{account_id}'}, account_id=21),
{'k': '{limit}',
'b': '21'})
| apache-2.0 |
jnovinger/django | tests/validation/models.py | 238 | 4718 | from __future__ import unicode_literals
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
def validate_answer_to_universe(value):
if value != 42:
raise ValidationError('This is not the answer to life, universe and everything!', code='not42')
class ModelToValidate(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField(default=datetime.now)
number = models.IntegerField(db_column='number_val')
parent = models.ForeignKey(
'self',
models.SET_NULL,
blank=True, null=True,
limit_choices_to={'number': 10},
)
email = models.EmailField(blank=True)
ufm = models.ForeignKey(
'UniqueFieldsModel',
models.SET_NULL,
to_field='unique_charfield',
blank=True, null=True,
)
url = models.URLField(blank=True)
f_with_custom_validator = models.IntegerField(blank=True, null=True, validators=[validate_answer_to_universe])
slug = models.SlugField(blank=True)
def clean(self):
super(ModelToValidate, self).clean()
if self.number == 11:
raise ValidationError('Invalid number supplied!')
class UniqueFieldsModel(models.Model):
unique_charfield = models.CharField(max_length=100, unique=True)
unique_integerfield = models.IntegerField(unique=True)
non_unique_field = models.IntegerField()
class CustomPKModel(models.Model):
my_pk_field = models.CharField(max_length=100, primary_key=True)
class UniqueTogetherModel(models.Model):
cfield = models.CharField(max_length=100)
ifield = models.IntegerField()
efield = models.EmailField()
class Meta:
unique_together = (('ifield', 'cfield',), ['ifield', 'efield'])
class UniqueForDateModel(models.Model):
start_date = models.DateField()
end_date = models.DateTimeField()
count = models.IntegerField(unique_for_date="start_date", unique_for_year="end_date")
order = models.IntegerField(unique_for_month="end_date")
name = models.CharField(max_length=100)
class CustomMessagesModel(models.Model):
other = models.IntegerField(blank=True, null=True)
number = models.IntegerField(db_column='number_val',
error_messages={'null': 'NULL', 'not42': 'AAARGH', 'not_equal': '%s != me'},
validators=[validate_answer_to_universe]
)
class Author(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, models.CASCADE)
pub_date = models.DateTimeField(blank=True)
def clean(self):
if self.pub_date is None:
self.pub_date = datetime.now()
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
class UniqueErrorsModel(models.Model):
name = models.CharField(max_length=100, unique=True, error_messages={'unique': 'Custom unique name message.'})
no = models.IntegerField(unique=True, error_messages={'unique': 'Custom unique number message.'})
class GenericIPAddressTestModel(models.Model):
generic_ip = models.GenericIPAddressField(blank=True, null=True, unique=True)
v4_ip = models.GenericIPAddressField(blank=True, null=True, protocol="ipv4")
v6_ip = models.GenericIPAddressField(blank=True, null=True, protocol="ipv6")
ip_verbose_name = models.GenericIPAddressField("IP Address Verbose",
blank=True, null=True)
class GenericIPAddrUnpackUniqueTest(models.Model):
generic_v4unpack_ip = models.GenericIPAddressField(null=True, blank=True, unique=True, unpack_ipv4=True)
# A model can't have multiple AutoFields
# Refs #12467.
assertion_error = None
try:
class MultipleAutoFields(models.Model):
auto1 = models.AutoField(primary_key=True)
auto2 = models.AutoField(primary_key=True)
except AssertionError as exc:
assertion_error = exc
assert str(assertion_error) == "A model can't have more than one AutoField."
| bsd-3-clause |
TangHao1987/intellij-community | plugins/hg4idea/testData/bin/mercurial/dirstate.py | 90 | 28061 | # dirstate.py - working directory tracking for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import errno
from node import nullid
from i18n import _
import scmutil, util, ignore, osutil, parsers, encoding
import os, stat, errno, gc
propertycache = util.propertycache
filecache = scmutil.filecache
_rangemask = 0x7fffffff
class repocache(filecache):
"""filecache for files in .hg/"""
def join(self, obj, fname):
return obj._opener.join(fname)
class rootcache(filecache):
"""filecache for files in the repository root"""
def join(self, obj, fname):
return obj._join(fname)
class dirstate(object):
def __init__(self, opener, ui, root, validate):
'''Create a new dirstate object.
opener is an open()-like callable that can be used to open the
dirstate file; root is the root of the directory tracked by
the dirstate.
'''
self._opener = opener
self._validate = validate
self._root = root
self._rootdir = os.path.join(root, '')
self._dirty = False
self._dirtypl = False
self._lastnormaltime = 0
self._ui = ui
self._filecache = {}
@propertycache
def _map(self):
'''Return the dirstate contents as a map from filename to
(state, mode, size, time).'''
self._read()
return self._map
@propertycache
def _copymap(self):
self._read()
return self._copymap
@propertycache
def _foldmap(self):
f = {}
for name, s in self._map.iteritems():
if s[0] != 'r':
f[util.normcase(name)] = name
for name in self._dirs:
f[util.normcase(name)] = name
f['.'] = '.' # prevents useless util.fspath() invocation
return f
@repocache('branch')
def _branch(self):
try:
return self._opener.read("branch").strip() or "default"
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return "default"
@propertycache
def _pl(self):
try:
fp = self._opener("dirstate")
st = fp.read(40)
fp.close()
l = len(st)
if l == 40:
return st[:20], st[20:40]
elif l > 0 and l < 40:
raise util.Abort(_('working directory state appears damaged!'))
except IOError, err:
if err.errno != errno.ENOENT:
raise
return [nullid, nullid]
@propertycache
def _dirs(self):
return scmutil.dirs(self._map, 'r')
def dirs(self):
return self._dirs
@rootcache('.hgignore')
def _ignore(self):
files = [self._join('.hgignore')]
for name, path in self._ui.configitems("ui"):
if name == 'ignore' or name.startswith('ignore.'):
files.append(util.expandpath(path))
return ignore.ignore(self._root, files, self._ui.warn)
@propertycache
def _slash(self):
return self._ui.configbool('ui', 'slash') and os.sep != '/'
@propertycache
def _checklink(self):
return util.checklink(self._root)
@propertycache
def _checkexec(self):
return util.checkexec(self._root)
@propertycache
def _checkcase(self):
return not util.checkcase(self._join('.hg'))
def _join(self, f):
# much faster than os.path.join()
# it's safe because f is always a relative path
return self._rootdir + f
def flagfunc(self, buildfallback):
if self._checklink and self._checkexec:
def f(x):
try:
st = os.lstat(self._join(x))
if util.statislink(st):
return 'l'
if util.statisexec(st):
return 'x'
except OSError:
pass
return ''
return f
fallback = buildfallback()
if self._checklink:
def f(x):
if os.path.islink(self._join(x)):
return 'l'
if 'x' in fallback(x):
return 'x'
return ''
return f
if self._checkexec:
def f(x):
if 'l' in fallback(x):
return 'l'
if util.isexec(self._join(x)):
return 'x'
return ''
return f
else:
return fallback
def getcwd(self):
cwd = os.getcwd()
if cwd == self._root:
return ''
# self._root ends with a path separator if self._root is '/' or 'C:\'
rootsep = self._root
if not util.endswithsep(rootsep):
rootsep += os.sep
if cwd.startswith(rootsep):
return cwd[len(rootsep):]
else:
# we're outside the repo. return an absolute path.
return cwd
def pathto(self, f, cwd=None):
if cwd is None:
cwd = self.getcwd()
path = util.pathto(self._root, cwd, f)
if self._slash:
return util.pconvert(path)
return path
def __getitem__(self, key):
'''Return the current state of key (a filename) in the dirstate.
States are:
n normal
m needs merging
r marked for removal
a marked for addition
? not tracked
'''
return self._map.get(key, ("?",))[0]
def __contains__(self, key):
return key in self._map
def __iter__(self):
for x in sorted(self._map):
yield x
def iteritems(self):
return self._map.iteritems()
def parents(self):
return [self._validate(p) for p in self._pl]
def p1(self):
return self._validate(self._pl[0])
def p2(self):
return self._validate(self._pl[1])
def branch(self):
return encoding.tolocal(self._branch)
def setparents(self, p1, p2=nullid):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
# Discard 'm' markers when moving away from a merge state
for f, s in self._map.iteritems():
if s[0] == 'm':
if f in self._copymap:
copies[f] = self._copymap[f]
self.normallookup(f)
return copies
def setbranch(self, branch):
self._branch = encoding.fromlocal(branch)
f = self._opener('branch', 'w', atomictemp=True)
try:
f.write(self._branch + '\n')
f.close()
# make sure filecache has the correct stat info for _branch after
# replacing the underlying file
ce = self._filecache['_branch']
if ce:
ce.refresh()
except: # re-raises
f.discard()
raise
def _read(self):
self._map = {}
self._copymap = {}
try:
st = self._opener.read("dirstate")
except IOError, err:
if err.errno != errno.ENOENT:
raise
return
if not st:
return
# Python's garbage collector triggers a GC each time a certain number
# of container objects (the number being defined by
# gc.get_threshold()) are allocated. parse_dirstate creates a tuple
# for each file in the dirstate. The C version then immediately marks
# them as not to be tracked by the collector. However, this has no
# effect on when GCs are triggered, only on what objects the GC looks
# into. This means that O(number of files) GCs are unavoidable.
# Depending on when in the process's lifetime the dirstate is parsed,
# this can get very expensive. As a workaround, disable GC while
# parsing the dirstate.
gcenabled = gc.isenabled()
gc.disable()
try:
p = parsers.parse_dirstate(self._map, self._copymap, st)
finally:
if gcenabled:
gc.enable()
if not self._dirtypl:
self._pl = p
def invalidate(self):
for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
self._dirty = False
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
if source == dest:
return
self._dirty = True
if source is not None:
self._copymap[dest] = source
elif dest in self._copymap:
del self._copymap[dest]
def copied(self, file):
return self._copymap.get(file, None)
def copies(self):
return self._copymap
def _droppath(self, f):
if self[f] not in "?r" and "_dirs" in self.__dict__:
self._dirs.delpath(f)
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
if f in self._dirs:
raise util.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in scmutil.finddirs(f):
if d in self._dirs:
break
if d in self._map and self[d] != 'r':
raise util.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
if oldstate in "?r" and "_dirs" in self.__dict__:
self._dirs.addpath(f)
self._dirty = True
self._map[f] = (state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
s = os.lstat(self._join(f))
mtime = int(s.st_mtime)
self._addpath(f, 'n', s.st_mode,
s.st_size & _rangemask, mtime & _rangemask)
if f in self._copymap:
del self._copymap[f]
if mtime > self._lastnormaltime:
# Remember the most recent modification timeslot for status(),
# to make sure we won't miss future size-preserving file content
# modifications that happen within the same timeslot.
self._lastnormaltime = mtime
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
if self._pl[1] != nullid and f in self._map:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map[f]
if entry[0] == 'r' and entry[2] in (-1, -2):
source = self._copymap.get(f)
if entry[2] == -1:
self.merge(f)
elif entry[2] == -2:
self.otherparent(f)
if source:
self.copy(source, f)
return
if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
return
self._addpath(f, 'n', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
if self._pl[1] == nullid:
raise util.Abort(_("setting %r to other parent "
"only allowed in merges") % f)
self._addpath(f, 'n', 0, -2, -1)
if f in self._copymap:
del self._copymap[f]
def add(self, f):
'''Mark a file added.'''
self._addpath(f, 'a', 0, -1, -1)
if f in self._copymap:
del self._copymap[f]
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
self._droppath(f)
size = 0
if self._pl[1] != nullid and f in self._map:
# backup the previous state
entry = self._map[f]
if entry[0] == 'm': # merge
size = -1
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map[f] = ('r', 0, size, 0)
if size == 0 and f in self._copymap:
del self._copymap[f]
def merge(self, f):
'''Mark a file merged.'''
if self._pl[1] == nullid:
return self.normallookup(f)
s = os.lstat(self._join(f))
self._addpath(f, 'm', s.st_mode,
s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
if f in self._copymap:
del self._copymap[f]
def drop(self, f):
'''Drop a file from the dirstate'''
if f in self._map:
self._dirty = True
self._droppath(f)
del self._map[f]
def _normalize(self, path, isknown, ignoremissing=False, exists=None):
normed = util.normcase(path)
folded = self._foldmap.get(normed, None)
if folded is None:
if isknown:
folded = path
else:
if exists is None:
exists = os.path.lexists(os.path.join(self._root, path))
if not exists:
# Maybe a path component exists
if not ignoremissing and '/' in path:
d, f = path.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, None)
folded = d + "/" + f
else:
# No path components, preserve original case
folded = path
else:
# recursively normalize leading directory components
# against dirstate
if '/' in normed:
d, f = normed.rsplit('/', 1)
d = self._normalize(d, isknown, ignoremissing, True)
r = self._root + "/" + d
folded = d + "/" + util.fspath(f, r)
else:
folded = util.fspath(normed, self._root)
self._foldmap[normed] = folded
return folded
def normalize(self, path, isknown=False, ignoremissing=False):
'''
normalize the case of a pathname when on a casefolding filesystem
isknown specifies whether the filename came from walking the
disk, to avoid extra filesystem access.
If ignoremissing is True, missing path are returned
unchanged. Otherwise, we try harder to normalize possibly
existing path components.
The normalized case is determined based on the following precedence:
- version of name already stored in the dirstate
- version of name stored on disk
- version provided via command arguments
'''
if self._checkcase:
return self._normalize(path, isknown, ignoremissing)
return path
def clear(self):
self._map = {}
if "_dirs" in self.__dict__:
delattr(self, "_dirs")
self._copymap = {}
self._pl = [nullid, nullid]
self._lastnormaltime = 0
self._dirty = True
def rebuild(self, parent, allfiles, changedfiles=None):
changedfiles = changedfiles or allfiles
oldmap = self._map
self.clear()
for f in allfiles:
if f not in changedfiles:
self._map[f] = oldmap[f]
else:
if 'x' in allfiles.flags(f):
self._map[f] = ('n', 0777, -1, 0)
else:
self._map[f] = ('n', 0666, -1, 0)
self._pl = (parent, nullid)
self._dirty = True
def write(self):
if not self._dirty:
return
st = self._opener("dirstate", "w", atomictemp=True)
def finish(s):
st.write(s)
st.close()
self._lastnormaltime = 0
self._dirty = self._dirtypl = False
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime
finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
def _dirignore(self, f):
if f == '.':
return False
if self._ignore(f):
return True
for p in scmutil.finddirs(f):
if self._ignore(p):
return True
return False
def walk(self, match, subrepos, unknown, ignored):
'''
Walk recursively through the directory tree, finding all files
matched by match.
Return a dict mapping filename to stat-like object (either
mercurial.osutil.stat instance or return value of os.stat()).
'''
def fwarn(f, msg):
self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
return False
def badtype(mode):
kind = _('unknown')
if stat.S_ISCHR(mode):
kind = _('character device')
elif stat.S_ISBLK(mode):
kind = _('block device')
elif stat.S_ISFIFO(mode):
kind = _('fifo')
elif stat.S_ISSOCK(mode):
kind = _('socket')
elif stat.S_ISDIR(mode):
kind = _('directory')
return _('unsupported file type (type is %s)') % kind
ignore = self._ignore
dirignore = self._dirignore
if ignored:
ignore = util.never
dirignore = util.never
elif not unknown:
# if unknown and ignored are False, skip step 2
ignore = util.always
dirignore = util.always
matchfn = match.matchfn
matchalways = match.always()
badfn = match.bad
dmap = self._map
normpath = util.normpath
listdir = osutil.listdir
lstat = os.lstat
getkind = stat.S_IFMT
dirkind = stat.S_IFDIR
regkind = stat.S_IFREG
lnkkind = stat.S_IFLNK
join = self._join
work = []
wadd = work.append
exact = skipstep3 = False
if matchfn == match.exact: # match.exact
exact = True
dirignore = util.always # skip step 2
elif match.files() and not match.anypats(): # match.match, no patterns
skipstep3 = True
if not exact and self._checkcase:
normalize = self._normalize
skipstep3 = False
else:
normalize = None
files = sorted(match.files())
subrepos.sort()
i, j = 0, 0
while i < len(files) and j < len(subrepos):
subpath = subrepos[j] + "/"
if files[i] < subpath:
i += 1
continue
while i < len(files) and files[i].startswith(subpath):
del files[i]
j += 1
if not files or '.' in files:
files = ['']
results = dict.fromkeys(subrepos)
results['.hg'] = None
# step 1: find all explicit files
for ff in files:
if normalize:
nf = normalize(normpath(ff), False, True)
else:
nf = normpath(ff)
if nf in results:
continue
try:
st = lstat(join(nf))
kind = getkind(st.st_mode)
if kind == dirkind:
skipstep3 = False
if nf in dmap:
#file deleted on disk but still in dirstate
results[nf] = None
match.dir(nf)
if not dirignore(nf):
wadd(nf)
elif kind == regkind or kind == lnkkind:
results[nf] = st
else:
badfn(ff, badtype(kind))
if nf in dmap:
results[nf] = None
except OSError, inst:
if nf in dmap: # does it exactly match a file?
results[nf] = None
else: # does it match a directory?
prefix = nf + "/"
for fn in dmap:
if fn.startswith(prefix):
match.dir(nf)
skipstep3 = False
break
else:
badfn(ff, inst.strerror)
# step 2: visit subdirectories
while work:
nd = work.pop()
skip = None
if nd == '.':
nd = ''
else:
skip = '.hg'
try:
entries = listdir(join(nd), stat=True, skip=skip)
except OSError, inst:
if inst.errno in (errno.EACCES, errno.ENOENT):
fwarn(nd, inst.strerror)
continue
raise
for f, kind, st in entries:
if normalize:
nf = normalize(nd and (nd + "/" + f) or f, True, True)
else:
nf = nd and (nd + "/" + f) or f
if nf not in results:
if kind == dirkind:
if not ignore(nf):
match.dir(nf)
wadd(nf)
if nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
elif kind == regkind or kind == lnkkind:
if nf in dmap:
if matchalways or matchfn(nf):
results[nf] = st
elif (matchalways or matchfn(nf)) and not ignore(nf):
results[nf] = st
elif nf in dmap and (matchalways or matchfn(nf)):
results[nf] = None
for s in subrepos:
del results[s]
del results['.hg']
# step 3: report unseen items in the dmap hash
if not skipstep3 and not exact:
if not results and matchalways:
visit = dmap.keys()
else:
visit = [f for f in dmap if f not in results and matchfn(f)]
visit.sort()
if unknown:
# unknown == True means we walked the full directory tree above.
# So if a file is not seen it was either a) not matching matchfn
# b) ignored, c) missing, or d) under a symlink directory.
audit_path = scmutil.pathauditor(self._root)
for nf in iter(visit):
# Report ignored items in the dmap as long as they are not
# under a symlink directory.
if audit_path.check(nf):
try:
results[nf] = lstat(join(nf))
except OSError:
# file doesn't exist
results[nf] = None
else:
# It's either missing or under a symlink directory
results[nf] = None
else:
# We may not have walked the full directory tree above,
# so stat everything we missed.
nf = iter(visit).next
for st in util.statfiles([join(i) for i in visit]):
results[nf()] = st
return results
def status(self, match, subrepos, ignored, clean, unknown):
'''Determine the status of the working copy relative to the
dirstate and return a tuple of lists (unsure, modified, added,
removed, deleted, unknown, ignored, clean), where:
unsure:
files that might have been modified since the dirstate was
written, but need to be read to be sure (size is the same
but mtime differs)
modified:
files that have definitely been modified since the dirstate
was written (different size or mode)
added:
files that have been explicitly added with hg add
removed:
files that have been explicitly removed with hg remove
deleted:
files that have been deleted through other means ("missing")
unknown:
files not in the dirstate that are not ignored
ignored:
files not in the dirstate that are ignored
(by _dirignore())
clean:
files that have definitely not been modified since the
dirstate was written
'''
listignored, listclean, listunknown = ignored, clean, unknown
lookup, modified, added, unknown, ignored = [], [], [], [], []
removed, deleted, clean = [], [], []
dmap = self._map
ladd = lookup.append # aka "unsure"
madd = modified.append
aadd = added.append
uadd = unknown.append
iadd = ignored.append
radd = removed.append
dadd = deleted.append
cadd = clean.append
mexact = match.exact
dirignore = self._dirignore
checkexec = self._checkexec
checklink = self._checklink
copymap = self._copymap
lastnormaltime = self._lastnormaltime
lnkkind = stat.S_IFLNK
for fn, st in self.walk(match, subrepos, listunknown,
listignored).iteritems():
if fn not in dmap:
if (listignored or mexact(fn)) and dirignore(fn):
if listignored:
iadd(fn)
elif listunknown:
uadd(fn)
continue
state, mode, size, time = dmap[fn]
if not st and state in "nma":
dadd(fn)
elif state == 'n':
# The "mode & lnkkind != lnkkind or self._checklink"
# lines are an expansion of "islink => checklink"
# where islink means "is this a link?" and checklink
# means "can we check links?".
mtime = int(st.st_mtime)
if (size >= 0 and
((size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0100 and checkexec))
and (mode & lnkkind != lnkkind or checklink)
or size == -2 # other parent
or fn in copymap):
madd(fn)
elif ((time != mtime and time != mtime & _rangemask)
and (mode & lnkkind != lnkkind or checklink)):
ladd(fn)
elif mtime == lastnormaltime:
# fn may have been changed in the same timeslot without
# changing its size. This can happen if we quickly do
# multiple commits in a single transaction.
# Force lookup, so we don't miss such a racy file change.
ladd(fn)
elif listclean:
cadd(fn)
elif state == 'm':
madd(fn)
elif state == 'a':
aadd(fn)
elif state == 'r':
radd(fn)
return (lookup, modified, added, removed, deleted, unknown, ignored,
clean)
| apache-2.0 |
srikantbmandal/ansible | lib/ansible/modules/net_tools/ldap/ldap_entry.py | 38 | 10266 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Peter Sagerson <psagers@ignorare.net>
# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ldap_entry
short_description: Add or remove LDAP entries.
description:
- Add or remove LDAP entries. This module only asserts the existence or
non-existence of an LDAP entry, not its attributes. To assert the
attribute values of an entry, see M(ldap_attr).
notes:
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
a simple bind to access your server, pass the credentials in I(bind_dn)
and I(bind_pw).
version_added: '2.3'
author:
- Jiri Tyr (@jtyr)
requirements:
- python-ldap
options:
bind_dn:
required: false
default: null
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with
the EXTERNAL mechanism. If this is blank, we'll use an anonymous
bind.
bind_pw:
required: false
default: null
description:
- The password to use with I(bind_dn).
dn:
required: true
description:
- The DN of the entry to add or remove.
attributes:
required: false
default: null
description:
- If I(state=present), attributes necessary to create an entry. Existing
entries are never modified. To assert specific attribute values on an
existing entry, use M(ldap_attr) module instead.
objectClass:
required: false
default: null
description:
- If I(state=present), value or list of values to use when creating
the entry. It can either be a string or an actual list of
strings.
params:
required: false
default: null
description:
- List of options which allows to overwrite any of the task or the
I(attributes) options. To remove an option, set the value of the option
to C(null).
server_uri:
required: false
default: ldapi:///
description:
- A URI to the LDAP server. The default value lets the underlying
LDAP client library look for a UNIX domain socket in its default
location.
start_tls:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If true, we'll use the START_TLS LDAP extension.
state:
required: false
choices: [present, absent]
default: present
description:
- The target state of the entry.
validate_certs:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- If C(no), SSL certificates will not be validated. This should only be
used on sites using self-signed certificates.
version_added: "2.4"
"""
EXAMPLES = """
- name: Make sure we have a parent entry for users
ldap_entry:
dn: ou=users,dc=example,dc=com
objectClass: organizationalUnit
- name: Make sure we have an admin user
ldap_entry:
dn: cn=admin,dc=example,dc=com
objectClass:
- simpleSecurityObject
- organizationalRole
attributes:
description: An LDAP administrator
userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
#
# The same as in the previous example but with the authentication details
# stored in the ldap_auth variable:
#
# ldap_auth:
# server_uri: ldap://localhost/
# bind_dn: cn=admin,dc=example,dc=com
# bind_pw: password
- name: Get rid of an old entry
ldap_entry:
dn: ou=stuff,dc=example,dc=com
state: absent
params: "{{ ldap_auth }}"
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import ldap
import ldap.modlist
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
class LdapEntry(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.state = self.module.params['state']
self.verify_cert = self.module.params['validate_certs']
# Add the objectClass into the list of attributes
self.module.params['attributes']['objectClass'] = (
self.module.params['objectClass'])
# Load attributes
if self.state == 'present':
self.attrs = self._load_attrs()
# Establish connection
self.connection = self._connect_to_ldap()
def _load_attrs(self):
""" Turn attribute's value to array. """
attrs = {}
for name, value in self.module.params['attributes'].items():
if name not in attrs:
attrs[name] = []
if isinstance(value, list):
attrs[name] = value
else:
attrs[name].append(str(value))
return attrs
def add(self):
""" If self.dn does not exist, returns a callable that will add it. """
def _add():
self.connection.add_s(self.dn, modlist)
if not self._is_entry_present():
modlist = ldap.modlist.addModlist(self.attrs)
action = _add
else:
action = None
return action
def delete(self):
""" If self.dn exists, returns a callable that will delete it. """
def _delete():
self.connection.delete_s(self.dn)
if self._is_entry_present():
action = _delete
else:
action = None
return action
def _is_entry_present(self):
try:
self.connection.search_s(self.dn, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
is_present = False
else:
is_present = True
return is_present
def _connect_to_ldap(self):
if not self.verify_cert:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(msg="Cannot start TLS.", details=str(e))
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(
msg="Cannot bind to the server.", details=str(e))
return connection
def main():
module = AnsibleModule(
argument_spec={
'attributes': dict(default={}, type='dict'),
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'objectClass': dict(type='raw'),
'params': dict(type='dict'),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'state': dict(default='present', choices=['present', 'absent']),
'validate_certs': dict(default=True, type='bool'),
},
supports_check_mode=True,
)
if not HAS_LDAP:
module.fail_json(
msg="Missing required 'ldap' module (pip install python-ldap).")
state = module.params['state']
# Check if objectClass is present when needed
if state == 'present' and module.params['objectClass'] is None:
module.fail_json(msg="At least one objectClass must be provided.")
# Check if objectClass is of the correct type
if (
module.params['objectClass'] is not None and not (
isinstance(module.params['objectClass'], basestring) or
isinstance(module.params['objectClass'], list))):
module.fail_json(msg="objectClass must be either a string or a list.")
# Update module parameters with user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
for key, val in module.params['params'].items():
if key in module.argument_spec:
module.params[key] = val
else:
module.params['attributes'][key] = val
# Remove the params
module.params.pop('params', None)
# Instantiate the LdapEntry object
ldap = LdapEntry(module)
# Get the action function
if state == 'present':
action = ldap.add()
elif state == 'absent':
action = ldap.delete()
# Perform the action
if action is not None and not module.check_mode:
try:
action()
except Exception:
e = get_exception()
module.fail_json(msg="Entry action failed.", details=str(e))
module.exit_json(changed=(action is not None))
if __name__ == '__main__':
main()
| gpl-3.0 |
samuelmaudo/yepes | yepes/utils/unidecode/x063.py | 252 | 4656 | data = (
'Bo ', # 0x00
'Chi ', # 0x01
'Gua ', # 0x02
'Zhi ', # 0x03
'Kuo ', # 0x04
'Duo ', # 0x05
'Duo ', # 0x06
'Zhi ', # 0x07
'Qie ', # 0x08
'An ', # 0x09
'Nong ', # 0x0a
'Zhen ', # 0x0b
'Ge ', # 0x0c
'Jiao ', # 0x0d
'Ku ', # 0x0e
'Dong ', # 0x0f
'Ru ', # 0x10
'Tiao ', # 0x11
'Lie ', # 0x12
'Zha ', # 0x13
'Lu ', # 0x14
'Die ', # 0x15
'Wa ', # 0x16
'Jue ', # 0x17
'Mushiru ', # 0x18
'Ju ', # 0x19
'Zhi ', # 0x1a
'Luan ', # 0x1b
'Ya ', # 0x1c
'Zhua ', # 0x1d
'Ta ', # 0x1e
'Xie ', # 0x1f
'Nao ', # 0x20
'Dang ', # 0x21
'Jiao ', # 0x22
'Zheng ', # 0x23
'Ji ', # 0x24
'Hui ', # 0x25
'Xun ', # 0x26
'Ku ', # 0x27
'Ai ', # 0x28
'Tuo ', # 0x29
'Nuo ', # 0x2a
'Cuo ', # 0x2b
'Bo ', # 0x2c
'Geng ', # 0x2d
'Ti ', # 0x2e
'Zhen ', # 0x2f
'Cheng ', # 0x30
'Suo ', # 0x31
'Suo ', # 0x32
'Keng ', # 0x33
'Mei ', # 0x34
'Long ', # 0x35
'Ju ', # 0x36
'Peng ', # 0x37
'Jian ', # 0x38
'Yi ', # 0x39
'Ting ', # 0x3a
'Shan ', # 0x3b
'Nuo ', # 0x3c
'Wan ', # 0x3d
'Xie ', # 0x3e
'Cha ', # 0x3f
'Feng ', # 0x40
'Jiao ', # 0x41
'Wu ', # 0x42
'Jun ', # 0x43
'Jiu ', # 0x44
'Tong ', # 0x45
'Kun ', # 0x46
'Huo ', # 0x47
'Tu ', # 0x48
'Zhuo ', # 0x49
'Pou ', # 0x4a
'Le ', # 0x4b
'Ba ', # 0x4c
'Han ', # 0x4d
'Shao ', # 0x4e
'Nie ', # 0x4f
'Juan ', # 0x50
'Ze ', # 0x51
'Song ', # 0x52
'Ye ', # 0x53
'Jue ', # 0x54
'Bu ', # 0x55
'Huan ', # 0x56
'Bu ', # 0x57
'Zun ', # 0x58
'Yi ', # 0x59
'Zhai ', # 0x5a
'Lu ', # 0x5b
'Sou ', # 0x5c
'Tuo ', # 0x5d
'Lao ', # 0x5e
'Sun ', # 0x5f
'Bang ', # 0x60
'Jian ', # 0x61
'Huan ', # 0x62
'Dao ', # 0x63
'[?] ', # 0x64
'Wan ', # 0x65
'Qin ', # 0x66
'Peng ', # 0x67
'She ', # 0x68
'Lie ', # 0x69
'Min ', # 0x6a
'Men ', # 0x6b
'Fu ', # 0x6c
'Bai ', # 0x6d
'Ju ', # 0x6e
'Dao ', # 0x6f
'Wo ', # 0x70
'Ai ', # 0x71
'Juan ', # 0x72
'Yue ', # 0x73
'Zong ', # 0x74
'Chen ', # 0x75
'Chui ', # 0x76
'Jie ', # 0x77
'Tu ', # 0x78
'Ben ', # 0x79
'Na ', # 0x7a
'Nian ', # 0x7b
'Nuo ', # 0x7c
'Zu ', # 0x7d
'Wo ', # 0x7e
'Xi ', # 0x7f
'Xian ', # 0x80
'Cheng ', # 0x81
'Dian ', # 0x82
'Sao ', # 0x83
'Lun ', # 0x84
'Qing ', # 0x85
'Gang ', # 0x86
'Duo ', # 0x87
'Shou ', # 0x88
'Diao ', # 0x89
'Pou ', # 0x8a
'Di ', # 0x8b
'Zhang ', # 0x8c
'Gun ', # 0x8d
'Ji ', # 0x8e
'Tao ', # 0x8f
'Qia ', # 0x90
'Qi ', # 0x91
'Pai ', # 0x92
'Shu ', # 0x93
'Qian ', # 0x94
'Ling ', # 0x95
'Yi ', # 0x96
'Ya ', # 0x97
'Jue ', # 0x98
'Zheng ', # 0x99
'Liang ', # 0x9a
'Gua ', # 0x9b
'Yi ', # 0x9c
'Huo ', # 0x9d
'Shan ', # 0x9e
'Zheng ', # 0x9f
'Lue ', # 0xa0
'Cai ', # 0xa1
'Tan ', # 0xa2
'Che ', # 0xa3
'Bing ', # 0xa4
'Jie ', # 0xa5
'Ti ', # 0xa6
'Kong ', # 0xa7
'Tui ', # 0xa8
'Yan ', # 0xa9
'Cuo ', # 0xaa
'Zou ', # 0xab
'Ju ', # 0xac
'Tian ', # 0xad
'Qian ', # 0xae
'Ken ', # 0xaf
'Bai ', # 0xb0
'Shou ', # 0xb1
'Jie ', # 0xb2
'Lu ', # 0xb3
'Guo ', # 0xb4
'Haba ', # 0xb5
'[?] ', # 0xb6
'Zhi ', # 0xb7
'Dan ', # 0xb8
'Mang ', # 0xb9
'Xian ', # 0xba
'Sao ', # 0xbb
'Guan ', # 0xbc
'Peng ', # 0xbd
'Yuan ', # 0xbe
'Nuo ', # 0xbf
'Jian ', # 0xc0
'Zhen ', # 0xc1
'Jiu ', # 0xc2
'Jian ', # 0xc3
'Yu ', # 0xc4
'Yan ', # 0xc5
'Kui ', # 0xc6
'Nan ', # 0xc7
'Hong ', # 0xc8
'Rou ', # 0xc9
'Pi ', # 0xca
'Wei ', # 0xcb
'Sai ', # 0xcc
'Zou ', # 0xcd
'Xuan ', # 0xce
'Miao ', # 0xcf
'Ti ', # 0xd0
'Nie ', # 0xd1
'Cha ', # 0xd2
'Shi ', # 0xd3
'Zong ', # 0xd4
'Zhen ', # 0xd5
'Yi ', # 0xd6
'Shun ', # 0xd7
'Heng ', # 0xd8
'Bian ', # 0xd9
'Yang ', # 0xda
'Huan ', # 0xdb
'Yan ', # 0xdc
'Zuan ', # 0xdd
'An ', # 0xde
'Xu ', # 0xdf
'Ya ', # 0xe0
'Wo ', # 0xe1
'Ke ', # 0xe2
'Chuai ', # 0xe3
'Ji ', # 0xe4
'Ti ', # 0xe5
'La ', # 0xe6
'La ', # 0xe7
'Cheng ', # 0xe8
'Kai ', # 0xe9
'Jiu ', # 0xea
'Jiu ', # 0xeb
'Tu ', # 0xec
'Jie ', # 0xed
'Hui ', # 0xee
'Geng ', # 0xef
'Chong ', # 0xf0
'Shuo ', # 0xf1
'She ', # 0xf2
'Xie ', # 0xf3
'Yuan ', # 0xf4
'Qian ', # 0xf5
'Ye ', # 0xf6
'Cha ', # 0xf7
'Zha ', # 0xf8
'Bei ', # 0xf9
'Yao ', # 0xfa
'[?] ', # 0xfb
'[?] ', # 0xfc
'Lan ', # 0xfd
'Wen ', # 0xfe
'Qin ', # 0xff
)
| bsd-3-clause |
Donkyhotay/MoonPy | client/main.py | 1 | 3099 | """Copyright 2009:
Isaac Carroll, Kevin Clement, Jon Handy, David Carroll, Daniel Carroll
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
import pygame
import gettext
import platform
import sys
import os
import time
import introscreen
from networkclient import *
from gameclientstate import *
from mainmenu import *
#****************************************************************************
# The Main class of the client.
#****************************************************************************
class Main:
def __init__(self, debug, skipintro):
pygame.init()
tetherdir = os.getenv("HOME")
if str(tetherdir) == "None":
tetherdir = os.getenv("USERPROFILE")
tetherdir = os.path.join(tetherdir, "moonpy")
else:
tetherdir = os.path.join(tetherdir, ".moonpy")
if not os.path.exists(tetherdir):
os.mkdir(tetherdir)
logfile = os.path.join(tetherdir, "MoonPy.log")
if os.path.exists(logfile):
os.remove(logfile)
if debug == True:
logLevel = logging.INFO
logging.basicConfig(level=logging.DEBUG)
else:
LOG_FILENAME = logfile
logging.basicConfig(filename=LOG_FILENAME,level=logging.INFO)
logging.info('Platform: ' + platform.platform())
logging.info('Python version ' + sys.version)
logging.info('Pygame version: ' + pygame.version.ver)
self.client = GameClientState()
self.client.debug = debug
logging.info("MoonPy version %s" % (self.client.settings.string_version))
if skipintro == True:
time.sleep(1)
self.create_main_window()
self.client.moonaudio.intro()
else:
self.create_main_window()
self.client.moonaudio.intro()
self.intro = introscreen.IntroScreen(self.client.screen)
mainmenu = MainMenu(self.client)
#****************************************************************************
#
#****************************************************************************
def create_main_window(self):
screen_width = self.client.settings.screen_width
screen_height = self.client.settings.screen_height
screen_mode = 0
screen = pygame.display.set_mode((500, 500), screen_mode)
pygame.display.set_caption("Welcome to MoonPy")
self.client.screen = screen
| gpl-3.0 |
alinbalutoiu/tempest | tempest/services/compute/json/flavors_client.py | 7 | 7264 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import flavors as schema
from tempest.api_schema.response.compute.v2_1 import flavors_access \
as schema_access
from tempest.api_schema.response.compute.v2_1 import flavors_extra_specs \
as schema_extra_specs
from tempest.common import service_client
class FlavorsClient(service_client.ServiceClient):
def list_flavors(self, detail=False, **params):
url = 'flavors'
_schema = schema.list_flavors
if detail:
url += '/detail'
_schema = schema.list_flavors_details
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return service_client.ResponseBodyList(resp, body['flavors'])
def show_flavor(self, flavor_id):
resp, body = self.get("flavors/%s" % flavor_id)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return service_client.ResponseBody(resp, body['flavor'])
def create_flavor(self, **kwargs):
"""Creates a new flavor or instance type.
Most parameters except the following are passed to the API without
any changes.
:param ephemeral: The name is changed to OS-FLV-EXT-DATA:ephemeral
:param is_public: The name is changed to os-flavor-access:is_public
"""
if kwargs.get('ephemeral'):
kwargs['OS-FLV-EXT-DATA:ephemeral'] = kwargs.pop('ephemeral')
if kwargs.get('is_public'):
kwargs['os-flavor-access:is_public'] = kwargs.pop('is_public')
post_body = json.dumps({'flavor': kwargs})
resp, body = self.post('flavors', post_body)
body = json.loads(body)
self.validate_response(schema.create_get_flavor_details, resp, body)
return service_client.ResponseBody(resp, body['flavor'])
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
resp, body = self.delete("flavors/{0}".format(flavor_id))
self.validate_response(schema.delete_flavor, resp, body)
return service_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
# Did not use show_flavor(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
flavors = self.list_flavors(detail=True)
for flavor in flavors:
if flavor['id'] == id:
return False
return True
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'flavor'
def set_flavor_extra_spec(self, flavor_id, **kwargs):
"""Sets extra Specs to the mentioned flavor."""
post_body = json.dumps({'extra_specs': kwargs})
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
post_body)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return service_client.ResponseBody(resp, body['extra_specs'])
def list_flavor_extra_specs(self, flavor_id):
"""Gets extra Specs details of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id)
body = json.loads(body)
self.validate_response(schema_extra_specs.set_get_flavor_extra_specs,
resp, body)
return service_client.ResponseBody(resp, body['extra_specs'])
def show_flavor_extra_spec(self, flavor_id, key):
"""Gets extra Specs key-value of the mentioned flavor and key."""
resp, body = self.get('flavors/%s/os-extra_specs/%s' % (flavor_id,
key))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return service_client.ResponseBody(resp, body)
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update specified extra Specs of the mentioned flavor and key."""
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key), json.dumps(kwargs))
body = json.loads(body)
self.validate_response(
schema_extra_specs.set_get_flavor_extra_specs_key,
resp, body)
return service_client.ResponseBody(resp, body)
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets extra Specs from the mentioned flavor."""
resp, body = self.delete('flavors/%s/os-extra_specs/%s' %
(flavor_id, key))
self.validate_response(schema.unset_flavor_extra_specs, resp, body)
return service_client.ResponseBody(resp, body)
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % flavor_id)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return service_client.ResponseBodyList(resp, body['flavor_access'])
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
post_body = {
'addTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return service_client.ResponseBodyList(resp, body['flavor_access'])
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
post_body = {
'removeTenantAccess': {
'tenant': tenant_id
}
}
post_body = json.dumps(post_body)
resp, body = self.post('flavors/%s/action' % flavor_id, post_body)
body = json.loads(body)
self.validate_response(schema_access.add_remove_list_flavor_access,
resp, body)
return service_client.ResponseBody(resp, body['flavor_access'])
| apache-2.0 |
marcoprado17/flask-bone | src/flask_bombril/form_validators/email_format/email_format.py | 1 | 1348 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# ======================================================================================================================
# The MIT License (MIT)
# ======================================================================================================================
# Copyright (c) 2016 [Marco Aurélio Prado - marco.pdsv@gmail.com]
# ======================================================================================================================
import re
from wtforms.validators import Regexp, HostnameValidation, ValidationError, StopValidation
from flask_bombril.r import R
from flask_bombril.form_validators.utils import raise_with_stop
class EmailFormat(Regexp):
def __init__(self, stop=True):
self.message = R.string.validators.invalid_email_format
self.stop = stop
self.validate_hostname = HostnameValidation(
require_tld=True,
)
super(EmailFormat, self).__init__(r"^.+@([^.@][^@]+)$", re.IGNORECASE, self.message)
def __call__(self, form, field, message=None):
try:
match = super(EmailFormat, self).__call__(form, field)
if not self.validate_hostname(match.group(1)):
raise_with_stop(self)
except (ValidationError, StopValidation):
raise_with_stop(self) | mit |
yuhangwang/meson | tools/ac_converter.py | 1 | 9050 | #!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script reads config.h.meson, looks for header
checks and writes the corresponding meson declaration.
Copy config.h.in to config.h.meson, replace #undef
with #mesondefine and run this. We can't do this automatically
because some configure scripts have #undef statements
that are unrelated to configure checks.
"""
import sys
print('''cc = meson.get_compiler('c')
cdata = configuration_data()''')
print('check_headers = [')
for line in open(sys.argv[1]):
line = line.strip()
if line.startswith('#mesondefine') and \
line.endswith('_H'):
token = line.split()[1]
tarr = token.split('_')[1:-1]
tarr = [x.lower() for x in tarr]
hname = '/'.join(tarr) + '.h'
print(" ['%s', '%s']," % (token, hname))
print(']\n')
print('''foreach h : check_headers
if cc.has_header(h.get(1))
cdata.set(h.get(0), 1)
endif
endforeach
''')
# Add stuff here as it is encountered.
function_data = \
{'HAVE_FEENABLEEXCEPT' : ('feenableexcept', 'fenv.h'),
'HAVE_FECLEAREXCEPT' : ('feclearexcept', 'fenv.h'),
'HAVE_FEDISABLEEXCEPT' : ('fedisableexcept', 'fenv.h'),
'HAVE_MMAP' : ('mmap', 'sys/mman.h'),
'HAVE_GETPAGESIZE' : ('getpagesize', 'unistd.h'),
'HAVE_GETISAX' : ('getisax', 'sys/auxv.h'),
'HAVE_GETTIMEOFDAY' : ('gettimeofday', 'sys/time.h'),
'HAVE_MPROTECT' : ('mprotect', 'sys/mman.h'),
'HAVE_POSIX_MEMALIGN' : ('posix_memalign', 'stdlib.h'),
'HAVE_SIGACTION' : ('sigaction', 'signal.h'),
'HAVE_ALARM' : ('alarm', 'unistd.h'),
'HAVE_CLOCK_GETTIME' : ('clock_gettime', 'time.h'),
'HAVE_CTIME_R' : ('ctime_r', 'time.h'),
'HAVE_DRAND48' : ('drand48', 'stdlib.h'),
'HAVE_FLOCKFILE' : ('flockfile', 'stdio.h'),
'HAVE_FORK' : ('fork', 'unistd.h'),
'HAVE_FUNLOCKFILE' : ('funlockfile', 'stdio.h'),
'HAVE_GETLINE' : ('getline', 'stdio.h'),
'HAVE_LINK' : ('link', 'unistd.h'),
'HAVE_RAISE' : ('raise', 'signal.h'),
'HAVE_STRNDUP' : ('strndup', 'string.h'),
'HAVE_SCHED_GETAFFINITY' : ('sched_getaffinity', 'sched.h'),
'HAVE_WAITPID' : ('waitpid', 'sys/wait.h'),
'HAVE_XRENDERCREATECONICALGRADIENT' : ('XRenderCreateConicalGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATELINEARGRADIENT' : ('XRenderCreateLinearGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATERADIALGRADIENT' : ('XRenderCreateRadialGradient', 'xcb/render.h'),
'HAVE_XRENDERCREATESOLIDFILL' : ('XRenderCreateSolidFill', 'xcb/render.h'),
'HAVE_DCGETTEXT': ('dcgettext', 'libintl.h'),
'HAVE_ENDMNTENT': ('endmntent', 'mntent.h'),
'HAVE_ENDSERVENT' : ('endservent', 'netdb.h'),
'HAVE_EVENTFD': ('eventfd', 'sys/eventfd.h'),
'HAVE_FALLOCATE': ('fallocate', 'fcntl.h'),
'HAVE_FCHMOD': ('fchmod', 'sys/stat.h'),
'HAVE_FCHOWN': ('fchown', 'unistd.h'),
'HAVE_FDWALK': ('fdwalk', 'stdlib.h'),
'HAVE_FSYNC': ('fsync', 'unistd.h'),
'HAVE_GETC_UNLOCKED': ('getc_unlocked', 'stdio.h'),
'HAVE_GETFSSTAT': ('getfsstat', 'sys/mount.h'),
'HAVE_GETMNTENT_R': ('getmntent_r', 'mntent.h'),
'HAVE_GETPROTOBYNAME_R': ('getprotobyname_r', 'netdb.h'),
'HAVE_GETRESUID' : ('getresuid', 'unistd.h'),
'HAVE_GETVFSSTAT' : ('getvfsstat', 'sys/statvfs.h'),
'HAVE_GMTIME_R' : ('gmtime_r', 'time.h'),
'HAVE_HASMNTOPT': ('hasmntopt', 'mntent.h'),
'HAVE_IF_INDEXTONAME': ('if_indextoname', 'net/if.h'),
'HAVE_IF_NAMETOINDEX': ('if_nametoindex', 'net/if.h'),
'HAVE_INOTIFY_INIT1': ('inotify_init1', 'sys/inotify.h'),
'HAVE_ISSETUGID': ('issetugid', 'unistd.h'),
'HAVE_KEVENT': ('kevent', 'sys/event.h'),
'HAVE_KQUEUE': ('kqueue', 'sys/event.h'),
'HAVE_LCHMOD': ('lchmod', 'sys/stat.h'),
'HAVE_LCHOWN': ('lchown', 'unistd.h'),
'HAVE_LSTAT': ('lstat', 'sys/stat.h'),
'HAVE_MEMCPY': ('memcpy', 'string.h'),
'HAVE_MEMALIGN': ('memalign', 'stdlib.h'),
'HAVE_MEMMEM': ('memmem', 'string.h'),
'HAVE_NEWLOCALE': ('newlocale', 'locale.h'),
'HAVE_PIPE2': ('pipe2', 'fcntl.h'),
'HAVE_POLL': ('poll', 'poll.h'),
'HAVE_PRLIMIT': ('prlimit', 'sys/resource.h'),
'HAVE_PTHREAD_ATTR_SETSTACKSIZE': ('pthread_attr_setstacksize', 'pthread.h'),
'HAVE_PTHREAD_CONDATTR_SETCLOCK': ('pthread_condattr_setclock', 'pthread.h'),
'HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE_NP': ('pthread_cond_timedwait_relative_np', 'pthread.h'),
'HAVE_READLINK': ('readlink', 'unistd.h'),
'HAVE_RES_INIT': ('res_init', 'resolv.h'),
'HAVE_SENDMMSG': ('sendmmsg', 'sys/socket.h'),
'HAVE_SETENV': ('setenv', 'stdlib.h'),
'HAVE_SETMNTENT': ('setmntent', 'mntent.h'),
'HAVE_SNPRINTF': ('snprintf', 'stdio.h'),
'HAVE_SPLICE': ('splice', 'fcntl.h'),
'HAVE_STATFS': ('statfs', 'mount.h'),
'HAVE_STATVFS': ('statvfs', 'sys/statvfs.h'),
'HAVE_STPCOPY': ('stpcopy', 'string.h'),
'HAVE_STRCASECMP': ('strcasecmp', 'strings.h'),
'HAVE_STRLCPY': ('strlcpy', 'string.h'),
'HAVE_STRNCASECMP': ('strncasecmp', 'strings.h'),
'HAVE_STRSIGNAL': ('strsignal', 'signal.h'),
'HAVE_STRTOD_L': ('strtod_l', 'stdlib.h'),
'HAVE_STRTOLL_L': ('strtoll_l', 'stdlib.h'),
'HAVE_STRTOULL_L': ('strtoull_l', 'stdlib.h'),
'HAVE_SYMLINK': ('symlink', 'unistd.h'),
'HAVE_SYSCTLBYNAME': ('sysctlbyname', 'sys/sysctl.h'),
'HAVE_TIMEGM': ('timegm', 'time.h'),
'HAVE_UNSETENV': ('unsetenv', 'stdlib.h'),
'HAVE_USELOCALE': ('uselocale', 'xlocale.h'),
'HAVE_UTIMES': ('utimes', 'sys/time.h'),
'HAVE_VALLOC': ('valloc', 'stdlib.h'),
'HAVE_VASPRINTF': ('vasprintf', 'stdio.h'),
'HAVE_VSNPRINTF': ('vsnprintf', 'stdio.h'),
'HAVE_BCOPY': ('bcopy', 'strings.h'),
'HAVE_STRERROR': ('strerror', 'string.h'),
'HAVE_MEMMOVE': ('memmove', 'string.h'),
'HAVE_STRTOIMAX': ('strtoimax', 'inttypes.h'),
'HAVE_STRTOLL': ('strtoll', 'stdlib.h'),
'HAVE_STRTOQ': ('strtoq', 'stdlib.h'),
'HAVE_ACCEPT4': ('accept4', 'sys/socket.h'),
'HAVE_CHMOD': ('chmod', 'sys/stat.h'),
'HAVE_CHOWN': ('chown', 'unistd.h'),
'HAVE_FSTAT': ('fstat', 'sys/stat.h'),
'HAVE_GETADDRINFO': ('getaddrinfo', 'netdb.h'),
'HAVE_GETGRGID_R': ('getgrgid_r', 'grp.h'),
'HAVE_GETGRNAM_R': ('getgrnam_r', 'grp.h'),
'HAVE_GETGROUPS': ('getgroups', 'grp.h'),
'HAVE_GETOPT_LONG': ('getopt_long', 'getopt.h'),
'HAVE_GETPWNAM_R': ('getpwnam', 'pwd.h'),
'HAVE_GETPWUID_R': ('getpwuid_r', 'pwd.h'),
'HAVE_GETUID': ('getuid', 'unistd.h'),
'HAVE_LRINTF': ('lrintf', 'math.h'),
'HAVE_MKFIFO': ('mkfifo', 'sys/stat.h'),
'HAVE_MLOCK': ('mlock', 'sys/mman.h'),
'HAVE_NANOSLEEP': ('nanosleep', 'time.h'),
'HAVE_PIPE': ('pipe', 'unistd.h'),
'HAVE_PPOLL': ('ppoll', 'poll.h'),
'HAVE_REGEXEC': ('regexec', 'regex.h'),
'HAVE_SETEGID': ('setegid', 'unistd.h'),
'HAVE_SETEUID': ('seteuid', 'unistd.h'),
'HAVE_SETPGID': ('setpgid', 'unistd.h'),
'HAVE_SETREGID': ('setregid', 'unistd.h'),
'HAVE_SETRESGID': ('setresgid', 'unistd.h'),
'HAVE_SETRESUID': ('setresuid', 'unistd.h'),
'HAVE_SHM_OPEN': ('shm_open', 'fcntl.h'),
'HAVE_SLEEP': ('sleep', 'unistd.h'),
'HAVE_STRERROR_R': ('strerror_r', 'string.h'),
'HAVE_STRTOF': ('strtof', 'stdlib.h'),
'HAVE_SYSCONF': ('sysconf', 'unistd.h'),
'HAVE_USLEEP': ('usleep', 'unistd.h'),
'HAVE_VFORK': ('vfork', 'unistd.h'),
}
print('check_functions = [')
for line in open(sys.argv[1]):
try:
token = line.split()[1]
if token in function_data:
fdata = function_data[token]
print(" ['%s', '%s', '#include<%s>']," % (token, fdata[0], fdata[1]))
elif token.startswith('HAVE_') and not token.endswith('_H'):
print('# check token', token)
except Exception:
pass
print(']\n')
print('''foreach f : check_functions
if cc.has_function(f.get(1), prefix : f.get(2))
cdata.set(f.get(0), 1)
endif
endforeach
''')
# Convert sizeof checks.
for line in open(sys.argv[1]):
arr = line.strip().split()
if len(arr) != 2:
continue
elem = arr[1]
if elem.startswith('SIZEOF_'):
typename = elem.split('_', 1)[1].replace('_P', '*').replace('_', ' ').lower().replace('size t', 'size_t')
print("cdata.set('%s', cc.sizeof('%s'))" % (elem, typename))
print('''
configure_file(input : 'config.h.in',
output : 'config.h',
configuration : cdata)''')
| apache-2.0 |
kengz/Unity-Lab | slm_lab/agent/net/net_util.py | 1 | 9495 | from functools import partial
from slm_lab import ROOT_DIR
from slm_lab.lib import logger, util
import os
import pydash as ps
import torch
import torch.nn as nn
NN_LOWCASE_LOOKUP = {nn_name.lower(): nn_name for nn_name in nn.__dict__}
logger = logger.get_logger(__name__)
class NoOpLRScheduler:
'''Symbolic LRScheduler class for API consistency'''
def __init__(self, optim):
self.optim = optim
def step(self, epoch=None):
pass
def get_lr(self):
return self.optim.defaults['lr']
def build_sequential(dims, activation):
'''Build the Sequential model by interleaving nn.Linear and activation_fn'''
assert len(dims) >= 2, 'dims need to at least contain input, output'
dim_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for in_d, out_d in dim_pairs:
layers.append(nn.Linear(in_d, out_d))
layers.append(get_activation_fn(activation))
model = nn.Sequential(*layers)
return model
def get_activation_fn(activation):
'''Helper to generate activation function layers for net'''
nn_name = NN_LOWCASE_LOOKUP.get(activation) or NN_LOWCASE_LOOKUP['relu']
ActivationClass = getattr(nn, nn_name)
return ActivationClass()
def get_loss_fn(cls, loss_spec):
'''Helper to parse loss param and construct loss_fn for net'''
LossClass = getattr(nn, loss_spec['name'])
loss_spec = ps.omit(loss_spec, 'name')
loss_fn = LossClass(**loss_spec)
return loss_fn
def get_optim(cls, optim_spec):
'''Helper to parse optim param and construct optim for net'''
OptimClass = getattr(torch.optim, optim_spec['name'])
optim_spec = ps.omit(optim_spec, 'name')
optim = OptimClass(cls.parameters(), **optim_spec)
return optim
def get_lr_scheduler(cls, lr_scheduler_spec):
'''Helper to parse lr_scheduler param and construct Pytorch optim.lr_scheduler'''
if ps.is_empty(lr_scheduler_spec):
lr_scheduler = NoOpLRScheduler(cls.optim)
else:
LRSchedulerClass = getattr(torch.optim.lr_scheduler, lr_scheduler_spec['name'])
lr_scheduler_spec = ps.omit(lr_scheduler_spec, 'name')
lr_scheduler = LRSchedulerClass(cls.optim, **lr_scheduler_spec)
return lr_scheduler
def get_policy_out_dim(body):
'''Helper method to construct the policy network out_dim for a body according to is_discrete, action_type'''
action_dim = body.action_dim
if body.is_discrete:
if body.action_type == 'multi_discrete':
assert ps.is_list(action_dim), action_dim
policy_out_dim = action_dim
else:
assert ps.is_integer(action_dim), action_dim
policy_out_dim = action_dim
else:
if body.action_type == 'multi_continuous':
assert ps.is_list(action_dim), action_dim
raise NotImplementedError('multi_continuous not supported yet')
else:
assert ps.is_integer(action_dim), action_dim
if action_dim == 1:
policy_out_dim = 2 # singleton stay as int
else:
policy_out_dim = action_dim * [2]
return policy_out_dim
def get_out_dim(body, add_critic=False):
'''Construct the NetClass out_dim for a body according to is_discrete, action_type, and whether to add a critic unit'''
policy_out_dim = get_policy_out_dim(body)
if add_critic:
if ps.is_list(policy_out_dim):
out_dim = policy_out_dim + [1]
else:
out_dim = [policy_out_dim, 1]
else:
out_dim = policy_out_dim
return out_dim
def init_layers(net, init_fn):
if init_fn is None:
return
if init_fn == 'xavier_uniform_':
try:
gain = nn.init.calculate_gain(net.hid_layers_activation)
except ValueError:
gain = 1
init_fn = partial(nn.init.xavier_uniform_, gain=gain)
elif 'kaiming' in init_fn:
assert net.hid_layers_activation in ['relu', 'leaky_relu'], f'Kaiming initialization not supported for {net.hid_layers_activation}'
init_fn = nn.init.__dict__[init_fn]
init_fn = partial(init_fn, nonlinearity=net.hid_layers_activation)
else:
init_fn = nn.init.__dict__[init_fn]
net.apply(partial(init_parameters, init_fn=init_fn))
def init_parameters(module, init_fn):
'''
Initializes module's weights using init_fn, which is the name of function from from nn.init
Initializes module's biases to either 0.01 or 0.0, depending on module
The only exception is BatchNorm layers, for which we use uniform initialization
'''
bias_init = 0.01
classname = module.__class__.__name__
if 'BatchNorm' in classname:
init_fn(module.weight)
nn.init.constant_(module.bias, bias_init)
elif 'GRU' in classname:
for name, param in module.named_parameters():
if 'weight' in name:
init_fn(param)
elif 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'Linear' in classname or ('Conv' in classname and 'Net' not in classname):
init_fn(module.weight)
nn.init.constant_(module.bias, bias_init)
# params methods
def save(net, model_path):
'''Save model weights to path'''
torch.save(net.state_dict(), util.smart_path(model_path))
logger.info(f'Saved model to {model_path}')
def save_algorithm(algorithm, ckpt=None):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
prepath = util.get_prepath(agent.spec, agent.info_space, unit='session')
if ckpt is not None:
prepath = f'{prepath}_ckpt-{ckpt}'
logger.info(f'Saving algorithm {util.get_class_name(algorithm)} nets {net_names}')
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{prepath}_{net_name}_model.pth'
save(net, model_path)
optim_path = f'{prepath}_{net_name}_optim.pth'
save(net.optim, optim_path)
def load(net, model_path):
'''Save model weights from a path into a net module'''
device = None if torch.cuda.is_available() else 'cpu'
net.load_state_dict(torch.load(util.smart_path(model_path), map_location=device))
logger.info(f'Loaded model from {model_path}')
def load_algorithm(algorithm):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
if util.in_eval_lab_modes():
# load specific model in eval mode
prepath = agent.info_space.eval_model_prepath
else:
prepath = util.get_prepath(agent.spec, agent.info_space, unit='session')
logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names}')
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{prepath}_{net_name}_model.pth'
load(net, model_path)
optim_path = f'{prepath}_{net_name}_optim.pth'
load(net.optim, optim_path)
def copy(src_net, tar_net):
'''Copy model weights from src to target'''
tar_net.load_state_dict(src_net.state_dict())
def polyak_update(src_net, tar_net, beta=0.5):
'''Polyak weight update to update a target tar_net'''
tar_params = tar_net.named_parameters()
src_params = src_net.named_parameters()
src_dict_params = dict(src_params)
for name, tar_param in tar_params:
if name in src_dict_params:
src_dict_params[name].data.copy_(beta * tar_param.data + (1 - beta) * src_dict_params[name].data)
tar_net.load_state_dict(src_dict_params)
def to_assert_trained():
'''Condition for running assert_trained'''
return os.environ.get('PY_ENV') == 'test' or util.get_lab_mode() == 'dev'
def gen_assert_trained(pre_model):
'''
Generate assert_trained function used to check weight updates
@example
assert_trained = gen_assert_trained(model)
# ...
loss.backward()
optim.step()
assert_trained(model, loss)
'''
pre_weights = [param.clone() for param in pre_model.parameters()]
def assert_trained(post_model, loss):
post_weights = [param.clone() for param in post_model.parameters()]
if loss == 0:
# TODO if without momentum, weights should not change too
for p_name, param in post_model.named_parameters():
assert param.grad.norm() == 0
else:
assert not all(torch.equal(w1, w2) for w1, w2 in zip(pre_weights, post_weights)), f'Model parameter is not updated in training_step(), check if your tensor is detached from graph. loss: {loss}'
min_norm = 0
max_norm = 1e5
for p_name, param in post_model.named_parameters():
try:
assert min_norm < param.grad.norm() < max_norm, f'Gradient norm fails the extreme value check {min_norm} < {p_name}:{param.grad.norm()} < {max_norm}, which is bad. Loss: {loss}. Check your network and loss computation. Consider using the "clip_grad_val" net parameter.'
except Exception as e:
logger.warn(e)
logger.debug('Passed network weight update assertation in dev lab_mode.')
return assert_trained
def get_grad_norms(algorithm):
'''Gather all the net's grad norms of an algorithm for debugging'''
grad_norms = []
for net_name in algorithm.net_names:
net = getattr(algorithm, net_name)
if net.grad_norms is not None:
grad_norms.extend(net.grad_norms)
return grad_norms
| mit |
ypid/subuser | logic/subuserlib/subuser.py | 1 | 2473 | #!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
High level operations on subusers.
"""
#external imports
import sys
#internal imports
import subuserlib.classes.user,subuserlib.resolve,subuserlib.classes.subuser,subuserlib.verify,subuserlib.update
def add(user,subuserName,imageSourceIdentifier):
if subuserName in user.getRegistry().getSubusers():
sys.exit("A subuser named "+subuserName+" already exists.")
user.getRegistry().logChange("Adding subuser "+subuserName+" "+imageSourceIdentifier)
try:
try:
imageSource = subuserlib.resolve.resolveImageSource(user,imageSourceIdentifier)
except KeyError as keyError:
sys.exit("Could not add subuser. The image source "+imageSourceIdentifier+" does not exist.")
user.getRegistry().getSubusers()[subuserName] = subuserlib.classes.subuser.Subuser(user,subuserName,imageSource,None,False,False)
subuserlib.verify.verify(user)
user.getRegistry().commit()
except subuserlib.classes.dockerDaemon.ImageBuildException as e:
print("Adding subuser failed.")
print(str(e))
subuserlib.update.checkoutNoCommit(user,"HEAD")
def remove(user,subuserNames):
didSomething = False
for subuserName in subuserNames:
if subuserName in user.getRegistry().getSubusers():
user.getRegistry().logChange("Removing subuser "+str(subuserName))
try:
subuserHome = user.getRegistry().getSubusers()[subuserName].getHomeDirOnHost()
if subuserHome:
user.getRegistry().logChange(" If you wish to remove the subusers home directory, issule the command $ rm -r "+subuserHome)
except:
pass
user.getRegistry().logChange(" If you wish to remove the subusers image, issue the command $ subuser remove-old-images")
del user.getRegistry().getSubusers()[subuserName]
didSomething = True
else:
print("Cannot remove: subuser "+subuserName+" does not exist.")
if didSomething:
subuserlib.verify.verify(user)
user.getRegistry().commit()
def setExecutableShortcutInstalled(user,subuserName,installed):
if installed:
user.getRegistry().logChange("Creating shortcut for subuser "+subuserName)
else:
user.getRegistry().logChange("Removing shortcut for subuser "+subuserName)
user.getRegistry().getSubusers()[subuserName].setExecutableShortcutInstalled(installed)
subuserlib.verify.verify(user)
user.getRegistry().commit()
| lgpl-3.0 |
libvirt/libvirt-test-API | libvirttestapi/repos/domain/ifstats.py | 1 | 2454 | # Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# To get domain interface statistics
import libvirt
from libvirt import libvirtError
from libvirttestapi.src import sharedmod
from libvirttestapi.utils import utils
from libvirttestapi.utils.utils import get_xml_value
required_params = ('guestname',)
optional_params = {}
def check_guest_status(domobj):
"""Check guest current status"""
state = domobj.info()[0]
if state == libvirt.VIR_DOMAIN_SHUTOFF or state == libvirt.VIR_DOMAIN_SHUTDOWN:
# add check function
return False
else:
return True
def check_interface_stats():
"""Check interface statistic result"""
pass
def ifstats(params):
"""Domain interface statistic"""
logger = params['logger']
guestname = params['guestname']
conn = sharedmod.libvirtobj['conn']
domobj = conn.lookupByName(guestname)
if check_guest_status(domobj):
pass
else:
try:
logger.info("%s is not running , power on it" % guestname)
domobj.create()
except libvirtError as e:
logger.error("API error message: %s, error code is %s"
% (e.get_error_message(), e.get_error_code()))
logger.error("start failed")
return 1
mac = utils.get_dom_mac_addr(guestname)
logger.info("mac address: %s" % mac)
ip = utils.mac_to_ip(mac, 180)
logger.info("ip: %s" % ip)
logger.info('ping guest')
if not utils.do_ping(ip, 300):
logger.error('Failed on ping guest, IP: ' + str(ip))
return 1
xml_path = "/domain/devices/interface/target/@dev"
path = get_xml_value(domobj, xml_path)
ifstats = domobj.interfaceStats(path[0])
if ifstats:
# check_interface_stats()
logger.debug(ifstats)
logger.info("%s rx_bytes %s" % (path[0], ifstats[0]))
logger.info("%s rx_packets %s" % (path[0], ifstats[1]))
logger.info("%s rx_errs %s" % (path[0], ifstats[2]))
logger.info("%s rx_drop %s" % (path[0], ifstats[3]))
logger.info("%s tx_bytes %s" % (path[0], ifstats[4]))
logger.info("%s tx_packets %s" % (path[0], ifstats[5]))
logger.info("%s tx_errs %s" % (path[0], ifstats[6]))
logger.info("%s tx_drop %s" % (path[0], ifstats[7]))
else:
logger.error("fail to get domain interface statistics\n")
return 1
return 0
| gpl-2.0 |
apple/llvm-project | lldb/examples/python/symbolication.py | 14 | 28331 | #!/usr/bin/env python
#----------------------------------------------------------------------
# Be sure to add the python path that points to the LLDB shared library.
#
# To use this in the embedded python interpreter using "lldb":
#
# cd /path/containing/crashlog.py
# lldb
# (lldb) script import crashlog
# "crashlog" command installed, type "crashlog --help" for detailed help
# (lldb) crashlog ~/Library/Logs/DiagnosticReports/a.crash
#
# The benefit of running the crashlog command inside lldb in the
# embedded python interpreter is when the command completes, there
# will be a target with all of the files loaded at the locations
# described in the crash log. Only the files that have stack frames
# in the backtrace will be loaded unless the "--load-all" option
# has been specified. This allows users to explore the program in the
# state it was in right at crash time.
#
# On MacOSX csh, tcsh:
# ( setenv PYTHONPATH /path/to/LLDB.framework/Resources/Python ; ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash )
#
# On MacOSX sh, bash:
# PYTHONPATH=/path/to/LLDB.framework/Resources/Python ./crashlog.py ~/Library/Logs/DiagnosticReports/a.crash
#----------------------------------------------------------------------
from __future__ import print_function
import lldb
import optparse
import os
import plistlib
import re
import shlex
import sys
import time
import uuid
class Address:
"""Class that represents an address that will be symbolicated"""
def __init__(self, target, load_addr):
self.target = target
self.load_addr = load_addr # The load address that this object represents
# the resolved lldb.SBAddress (if any), named so_addr for
# section/offset address
self.so_addr = None
self.sym_ctx = None # The cached symbol context for this address
# Any original textual description of this address to be used as a
# backup in case symbolication fails
self.description = None
self.symbolication = None # The cached symbolicated string that describes this address
self.inlined = False
def __str__(self):
s = "%#16.16x" % (self.load_addr)
if self.symbolication:
s += " %s" % (self.symbolication)
elif self.description:
s += " %s" % (self.description)
elif self.so_addr:
s += " %s" % (self.so_addr)
return s
def resolve_addr(self):
if self.so_addr is None:
self.so_addr = self.target.ResolveLoadAddress(self.load_addr)
return self.so_addr
def is_inlined(self):
return self.inlined
def get_symbol_context(self):
if self.sym_ctx is None:
sb_addr = self.resolve_addr()
if sb_addr:
self.sym_ctx = self.target.ResolveSymbolContextForAddress(
sb_addr, lldb.eSymbolContextEverything)
else:
self.sym_ctx = lldb.SBSymbolContext()
return self.sym_ctx
def get_instructions(self):
sym_ctx = self.get_symbol_context()
if sym_ctx:
function = sym_ctx.GetFunction()
if function:
return function.GetInstructions(self.target)
return sym_ctx.GetSymbol().GetInstructions(self.target)
return None
def symbolicate(self, verbose=False):
if self.symbolication is None:
self.symbolication = ''
self.inlined = False
sym_ctx = self.get_symbol_context()
if sym_ctx:
module = sym_ctx.GetModule()
if module:
# Print full source file path in verbose mode
if verbose:
self.symbolication += str(module.GetFileSpec()) + '`'
else:
self.symbolication += module.GetFileSpec().GetFilename() + '`'
function_start_load_addr = -1
function = sym_ctx.GetFunction()
block = sym_ctx.GetBlock()
line_entry = sym_ctx.GetLineEntry()
symbol = sym_ctx.GetSymbol()
inlined_block = block.GetContainingInlinedBlock()
if function:
self.symbolication += function.GetName()
if inlined_block:
self.inlined = True
self.symbolication += ' [inlined] ' + \
inlined_block.GetInlinedName()
block_range_idx = inlined_block.GetRangeIndexForBlockAddress(
self.so_addr)
if block_range_idx < lldb.UINT32_MAX:
block_range_start_addr = inlined_block.GetRangeStartAddress(
block_range_idx)
function_start_load_addr = block_range_start_addr.GetLoadAddress(
self.target)
if function_start_load_addr == -1:
function_start_load_addr = function.GetStartAddress().GetLoadAddress(self.target)
elif symbol:
self.symbolication += symbol.GetName()
function_start_load_addr = symbol.GetStartAddress().GetLoadAddress(self.target)
else:
self.symbolication = ''
return False
# Dump the offset from the current function or symbol if it
# is non zero
function_offset = self.load_addr - function_start_load_addr
if function_offset > 0:
self.symbolication += " + %u" % (function_offset)
elif function_offset < 0:
self.symbolication += " %i (invalid negative offset, file a bug) " % function_offset
# Print out any line information if any is available
if line_entry.GetFileSpec():
# Print full source file path in verbose mode
if verbose:
self.symbolication += ' at %s' % line_entry.GetFileSpec()
else:
self.symbolication += ' at %s' % line_entry.GetFileSpec().GetFilename()
self.symbolication += ':%u' % line_entry.GetLine()
column = line_entry.GetColumn()
if column > 0:
self.symbolication += ':%u' % column
return True
return False
class Section:
"""Class that represents an load address range"""
sect_info_regex = re.compile('(?P<name>[^=]+)=(?P<range>.*)')
addr_regex = re.compile('^\s*(?P<start>0x[0-9A-Fa-f]+)\s*$')
range_regex = re.compile(
'^\s*(?P<start>0x[0-9A-Fa-f]+)\s*(?P<op>[-+])\s*(?P<end>0x[0-9A-Fa-f]+)\s*$')
def __init__(self, start_addr=None, end_addr=None, name=None):
self.start_addr = start_addr
self.end_addr = end_addr
self.name = name
@classmethod
def InitWithSBTargetAndSBSection(cls, target, section):
sect_load_addr = section.GetLoadAddress(target)
if sect_load_addr != lldb.LLDB_INVALID_ADDRESS:
obj = cls(
sect_load_addr,
sect_load_addr +
section.size,
section.name)
return obj
else:
return None
def contains(self, addr):
return self.start_addr <= addr and addr < self.end_addr
def set_from_string(self, s):
match = self.sect_info_regex.match(s)
if match:
self.name = match.group('name')
range_str = match.group('range')
addr_match = self.addr_regex.match(range_str)
if addr_match:
self.start_addr = int(addr_match.group('start'), 16)
self.end_addr = None
return True
range_match = self.range_regex.match(range_str)
if range_match:
self.start_addr = int(range_match.group('start'), 16)
self.end_addr = int(range_match.group('end'), 16)
op = range_match.group('op')
if op == '+':
self.end_addr += self.start_addr
return True
print('error: invalid section info string "%s"' % s)
print('Valid section info formats are:')
print('Format Example Description')
print('--------------------- -----------------------------------------------')
print('<name>=<base> __TEXT=0x123000 Section from base address only')
print('<name>=<base>-<end> __TEXT=0x123000-0x124000 Section from base address and end address')
print('<name>=<base>+<size> __TEXT=0x123000+0x1000 Section from base address and size')
return False
def __str__(self):
if self.name:
if self.end_addr is not None:
if self.start_addr is not None:
return "%s=[0x%16.16x - 0x%16.16x)" % (
self.name, self.start_addr, self.end_addr)
else:
if self.start_addr is not None:
return "%s=0x%16.16x" % (self.name, self.start_addr)
return self.name
return "<invalid>"
class Image:
"""A class that represents an executable image and any associated data"""
def __init__(self, path, uuid=None):
self.path = path
self.resolved_path = None
self.resolved = False
self.unavailable = False
self.uuid = uuid
self.section_infos = list()
self.identifier = None
self.version = None
self.arch = None
self.module = None
self.symfile = None
self.slide = None
@classmethod
def InitWithSBTargetAndSBModule(cls, target, module):
'''Initialize this Image object with a module from a target.'''
obj = cls(module.file.fullpath, module.uuid)
obj.resolved_path = module.platform_file.fullpath
obj.resolved = True
for section in module.sections:
symb_section = Section.InitWithSBTargetAndSBSection(
target, section)
if symb_section:
obj.section_infos.append(symb_section)
obj.arch = module.triple
obj.module = module
obj.symfile = None
obj.slide = None
return obj
def dump(self, prefix):
print("%s%s" % (prefix, self))
def debug_dump(self):
print('path = "%s"' % (self.path))
print('resolved_path = "%s"' % (self.resolved_path))
print('resolved = %i' % (self.resolved))
print('unavailable = %i' % (self.unavailable))
print('uuid = %s' % (self.uuid))
print('section_infos = %s' % (self.section_infos))
print('identifier = "%s"' % (self.identifier))
print('version = %s' % (self.version))
print('arch = %s' % (self.arch))
print('module = %s' % (self.module))
print('symfile = "%s"' % (self.symfile))
print('slide = %i (0x%x)' % (self.slide, self.slide))
def __str__(self):
s = ''
if self.uuid:
s += "%s " % (self.get_uuid())
if self.arch:
s += "%s " % (self.arch)
if self.version:
s += "%s " % (self.version)
resolved_path = self.get_resolved_path()
if resolved_path:
s += "%s " % (resolved_path)
for section_info in self.section_infos:
s += ", %s" % (section_info)
if self.slide is not None:
s += ', slide = 0x%16.16x' % self.slide
return s
def add_section(self, section):
# print "added '%s' to '%s'" % (section, self.path)
self.section_infos.append(section)
def get_section_containing_load_addr(self, load_addr):
for section_info in self.section_infos:
if section_info.contains(load_addr):
return section_info
return None
def get_resolved_path(self):
if self.resolved_path:
return self.resolved_path
elif self.path:
return self.path
return None
def get_resolved_path_basename(self):
path = self.get_resolved_path()
if path:
return os.path.basename(path)
return None
def symfile_basename(self):
if self.symfile:
return os.path.basename(self.symfile)
return None
def has_section_load_info(self):
return self.section_infos or self.slide is not None
def load_module(self, target):
if self.unavailable:
return None # We already warned that we couldn't find this module, so don't return an error string
# Load this module into "target" using the section infos to
# set the section load addresses
if self.has_section_load_info():
if target:
if self.module:
if self.section_infos:
num_sections_loaded = 0
for section_info in self.section_infos:
if section_info.name:
section = self.module.FindSection(
section_info.name)
if section:
error = target.SetSectionLoadAddress(
section, section_info.start_addr)
if error.Success():
num_sections_loaded += 1
else:
return 'error: %s' % error.GetCString()
else:
return 'error: unable to find the section named "%s"' % section_info.name
else:
return 'error: unable to find "%s" section in "%s"' % (
range.name, self.get_resolved_path())
if num_sections_loaded == 0:
return 'error: no sections were successfully loaded'
else:
err = target.SetModuleLoadAddress(
self.module, self.slide)
if err.Fail():
return err.GetCString()
return None
else:
return 'error: invalid module'
else:
return 'error: invalid target'
else:
return 'error: no section infos'
def add_module(self, target):
'''Add the Image described in this object to "target" and load the sections if "load" is True.'''
if target:
# Try and find using UUID only first so that paths need not match
# up
uuid_str = self.get_normalized_uuid_string()
if uuid_str:
self.module = target.AddModule(None, None, uuid_str)
if not self.module:
self.locate_module_and_debug_symbols()
if self.unavailable:
return None
resolved_path = self.get_resolved_path()
self.module = target.AddModule(
resolved_path, str(self.arch), uuid_str, self.symfile)
if not self.module:
return 'error: unable to get module for (%s) "%s"' % (
self.arch, self.get_resolved_path())
if self.has_section_load_info():
return self.load_module(target)
else:
return None # No sections, the module was added to the target, so success
else:
return 'error: invalid target'
def locate_module_and_debug_symbols(self):
# By default, just use the paths that were supplied in:
# self.path
# self.resolved_path
# self.module
# self.symfile
# Subclasses can inherit from this class and override this function
self.resolved = True
return True
def get_uuid(self):
if not self.uuid and self.module:
self.uuid = uuid.UUID(self.module.GetUUIDString())
return self.uuid
def get_normalized_uuid_string(self):
if self.uuid:
return str(self.uuid).upper()
return None
def create_target(self, debugger):
'''Create a target using the information in this Image object.'''
if self.unavailable:
return None
if self.locate_module_and_debug_symbols():
resolved_path = self.get_resolved_path()
path_spec = lldb.SBFileSpec(resolved_path)
error = lldb.SBError()
target = debugger.CreateTarget(
resolved_path, self.arch, None, False, error)
if target:
self.module = target.FindModule(path_spec)
if self.has_section_load_info():
err = self.load_module(target)
if err:
print('ERROR: ', err)
return target
else:
print('error: unable to create a valid target for (%s) "%s"' % (self.arch, self.path))
else:
print('error: unable to locate main executable (%s) "%s"' % (self.arch, self.path))
return None
class Symbolicator:
def __init__(self, debugger=None, target=None, images=list()):
"""A class the represents the information needed to symbolicate
addresses in a program.
Do not call this initializer directly, but rather use the factory
methods.
"""
self.debugger = debugger
self.target = target
self.images = images # a list of images to be used when symbolicating
self.addr_mask = 0xffffffffffffffff
@classmethod
def InitWithSBTarget(cls, target):
"""Initialize a new Symbolicator with an existing SBTarget."""
obj = cls(target=target)
triple = target.triple
if triple:
arch = triple.split('-')[0]
if "arm" in arch:
obj.addr_mask = 0xfffffffffffffffe
for module in target.modules:
image = Image.InitWithSBTargetAndSBModule(target, module)
obj.images.append(image)
return obj
@classmethod
def InitWithSBDebugger(cls, debugger, images):
"""Initialize a new Symbolicator with an existing debugger and list of
images. The Symbolicator will create the target."""
obj = cls(debugger=debugger, images=images)
return obj
def __str__(self):
s = "Symbolicator:\n"
if self.target:
s += "Target = '%s'\n" % (self.target)
s += "Target modules:\n"
for m in self.target.modules:
s += str(m) + "\n"
s += "Images:\n"
for image in self.images:
s += ' %s\n' % (image)
return s
def find_images_with_identifier(self, identifier):
images = list()
for image in self.images:
if image.identifier == identifier:
images.append(image)
if len(images) == 0:
regex_text = '^.*\.%s$' % (re.escape(identifier))
regex = re.compile(regex_text)
for image in self.images:
if regex.match(image.identifier):
images.append(image)
return images
def find_image_containing_load_addr(self, load_addr):
for image in self.images:
if image.get_section_containing_load_addr(load_addr):
return image
return None
def create_target(self):
if self.target:
return self.target
if self.images:
for image in self.images:
self.target = image.create_target(self.debugger)
if self.target:
if self.target.GetAddressByteSize() == 4:
triple = self.target.triple
if triple:
arch = triple.split('-')[0]
if "arm" in arch:
self.addr_mask = 0xfffffffffffffffe
return self.target
return None
def symbolicate(self, load_addr, verbose=False):
if not self.target:
self.create_target()
if self.target:
live_process = False
process = self.target.process
if process:
state = process.state
if state > lldb.eStateUnloaded and state < lldb.eStateDetached:
live_process = True
# If we don't have a live process, we can attempt to find the image
# that a load address belongs to and lazily load its module in the
# target, but we shouldn't do any of this if we have a live process
if not live_process:
image = self.find_image_containing_load_addr(load_addr)
if image:
image.add_module(self.target)
symbolicated_address = Address(self.target, load_addr)
if symbolicated_address.symbolicate(verbose):
if symbolicated_address.so_addr:
symbolicated_addresses = list()
symbolicated_addresses.append(symbolicated_address)
# See if we were able to reconstruct anything?
while True:
inlined_parent_so_addr = lldb.SBAddress()
inlined_parent_sym_ctx = symbolicated_address.sym_ctx.GetParentOfInlinedScope(
symbolicated_address.so_addr, inlined_parent_so_addr)
if not inlined_parent_sym_ctx:
break
if not inlined_parent_so_addr:
break
symbolicated_address = Address(
self.target, inlined_parent_so_addr.GetLoadAddress(
self.target))
symbolicated_address.sym_ctx = inlined_parent_sym_ctx
symbolicated_address.so_addr = inlined_parent_so_addr
symbolicated_address.symbolicate(verbose)
# push the new frame onto the new frame stack
symbolicated_addresses.append(symbolicated_address)
if symbolicated_addresses:
return symbolicated_addresses
else:
print('error: no target in Symbolicator')
return None
def disassemble_instructions(
target,
instructions,
pc,
insts_before_pc,
insts_after_pc,
non_zeroeth_frame):
lines = list()
pc_index = -1
comment_column = 50
for inst_idx, inst in enumerate(instructions):
inst_pc = inst.GetAddress().GetLoadAddress(target)
if pc == inst_pc:
pc_index = inst_idx
mnemonic = inst.GetMnemonic(target)
operands = inst.GetOperands(target)
comment = inst.GetComment(target)
lines.append("%#16.16x: %8s %s" % (inst_pc, mnemonic, operands))
if comment:
line_len = len(lines[-1])
if line_len < comment_column:
lines[-1] += ' ' * (comment_column - line_len)
lines[-1] += "; %s" % comment
if pc_index >= 0:
# If we are disassembling the non-zeroeth frame, we need to backup the
# PC by 1
if non_zeroeth_frame and pc_index > 0:
pc_index = pc_index - 1
if insts_before_pc == -1:
start_idx = 0
else:
start_idx = pc_index - insts_before_pc
if start_idx < 0:
start_idx = 0
if insts_before_pc == -1:
end_idx = inst_idx
else:
end_idx = pc_index + insts_after_pc
if end_idx > inst_idx:
end_idx = inst_idx
for i in range(start_idx, end_idx + 1):
if i == pc_index:
print(' -> ', lines[i])
else:
print(' ', lines[i])
def print_module_section_data(section):
print(section)
section_data = section.GetSectionData()
if section_data:
ostream = lldb.SBStream()
section_data.GetDescription(ostream, section.GetFileAddress())
print(ostream.GetData())
def print_module_section(section, depth):
print(section)
if depth > 0:
num_sub_sections = section.GetNumSubSections()
for sect_idx in range(num_sub_sections):
print_module_section(
section.GetSubSectionAtIndex(sect_idx), depth - 1)
def print_module_sections(module, depth):
for sect in module.section_iter():
print_module_section(sect, depth)
def print_module_symbols(module):
for sym in module:
print(sym)
def Symbolicate(debugger, command_args):
usage = "usage: %prog [options] <addr1> [addr2 ...]"
description = '''Symbolicate one or more addresses using LLDB's python scripting API..'''
parser = optparse.OptionParser(
description=description,
prog='crashlog.py',
usage=usage)
parser.add_option(
'-v',
'--verbose',
action='store_true',
dest='verbose',
help='display verbose debug info',
default=False)
parser.add_option(
'-p',
'--platform',
type='string',
metavar='platform',
dest='platform',
help='Specify the platform to use when creating the debug target. Valid values include "localhost", "darwin-kernel", "ios-simulator", "remote-freebsd", "remote-macosx", "remote-ios", "remote-linux".')
parser.add_option(
'-f',
'--file',
type='string',
metavar='file',
dest='file',
help='Specify a file to use when symbolicating')
parser.add_option(
'-a',
'--arch',
type='string',
metavar='arch',
dest='arch',
help='Specify a architecture to use when symbolicating')
parser.add_option(
'-s',
'--slide',
type='int',
metavar='slide',
dest='slide',
help='Specify the slide to use on the file specified with the --file option',
default=None)
parser.add_option(
'--section',
type='string',
action='append',
dest='section_strings',
help='specify <sect-name>=<start-addr> or <sect-name>=<start-addr>-<end-addr>')
try:
(options, args) = parser.parse_args(command_args)
except:
return
symbolicator = Symbolicator(debugger)
images = list()
if options.file:
image = Image(options.file)
image.arch = options.arch
# Add any sections that were specified with one or more --section
# options
if options.section_strings:
for section_str in options.section_strings:
section = Section()
if section.set_from_string(section_str):
image.add_section(section)
else:
sys.exit(1)
if options.slide is not None:
image.slide = options.slide
symbolicator.images.append(image)
target = symbolicator.create_target()
if options.verbose:
print(symbolicator)
if target:
for addr_str in args:
addr = int(addr_str, 0)
symbolicated_addrs = symbolicator.symbolicate(
addr, options.verbose)
for symbolicated_addr in symbolicated_addrs:
print(symbolicated_addr)
print()
else:
print('error: no target for %s' % (symbolicator))
if __name__ == '__main__':
# Create a new debugger instance
debugger = lldb.SBDebugger.Create()
Symbolicate(debugger, sys.argv[1:])
SBDebugger.Destroy(debugger)
| apache-2.0 |
amalakar/dd-agent | checks.d/postgres.py | 1 | 25315 | """PostgreSQL check
Collects database-wide metrics and optionally per-relation metrics, custom metrics.
"""
# stdlib
import socket
# 3rd party
import pg8000 as pg
from pg8000 import InterfaceError, ProgrammingError
# project
from checks import AgentCheck, CheckException
from config import _is_affirmative
MAX_CUSTOM_RESULTS = 100
class ShouldRestartException(Exception):
pass
class PostgreSql(AgentCheck):
"""Collects per-database, and optionally per-relation metrics, custom metrics
"""
SOURCE_TYPE_NAME = 'postgresql'
RATE = AgentCheck.rate
GAUGE = AgentCheck.gauge
MONOTONIC = AgentCheck.monotonic_count
SERVICE_CHECK_NAME = 'postgres.can_connect'
# turning columns into tags
DB_METRICS = {
'descriptors': [
('datname', 'db')
],
'metrics': {},
'query': """
SELECT datname,
%s
FROM pg_stat_database
WHERE datname not ilike 'template%%'
AND datname not ilike 'postgres'
AND datname not ilike 'rdsadmin'
""",
'relation': False,
}
COMMON_METRICS = {
'numbackends' : ('postgresql.connections', GAUGE),
'xact_commit' : ('postgresql.commits', RATE),
'xact_rollback' : ('postgresql.rollbacks', RATE),
'blks_read' : ('postgresql.disk_read', RATE),
'blks_hit' : ('postgresql.buffer_hit', RATE),
'tup_returned' : ('postgresql.rows_returned', RATE),
'tup_fetched' : ('postgresql.rows_fetched', RATE),
'tup_inserted' : ('postgresql.rows_inserted', RATE),
'tup_updated' : ('postgresql.rows_updated', RATE),
'tup_deleted' : ('postgresql.rows_deleted', RATE),
'pg_database_size(datname) as pg_database_size' : ('postgresql.database_size', GAUGE),
}
NEWER_92_METRICS = {
'deadlocks' : ('postgresql.deadlocks', RATE),
'temp_bytes' : ('postgresql.temp_bytes', RATE),
'temp_files' : ('postgresql.temp_files', RATE),
}
BGW_METRICS = {
'descriptors': [],
'metrics': {},
'query': "select %s FROM pg_stat_bgwriter",
'relation': False,
}
COMMON_BGW_METRICS = {
'checkpoints_timed' : ('postgresql.bgwriter.checkpoints_timed', MONOTONIC),
'checkpoints_req' : ('postgresql.bgwriter.checkpoints_requested', MONOTONIC),
'buffers_checkpoint' : ('postgresql.bgwriter.buffers_checkpoint', MONOTONIC),
'buffers_clean' : ('postgresql.bgwriter.buffers_clean', MONOTONIC),
'maxwritten_clean' : ('postgresql.bgwriter.maxwritten_clean', MONOTONIC),
'buffers_backend' : ('postgresql.bgwriter.buffers_backend', MONOTONIC),
'buffers_alloc' : ('postgresql.bgwriter.buffers_alloc', MONOTONIC),
}
NEWER_91_BGW_METRICS = {
'buffers_backend_fsync': ('postgresql.bgwriter.buffers_backend_fsync', MONOTONIC),
}
NEWER_92_BGW_METRICS = {
'checkpoint_write_time': ('postgresql.bgwriter.write_time', MONOTONIC),
'checkpoint_sync_time' : ('postgresql.bgwriter.sync_time', MONOTONIC),
}
LOCK_METRICS = {
'descriptors': [
('mode', 'lock_mode'),
('relname', 'table'),
],
'metrics': {
'lock_count' : ('postgresql.locks', GAUGE),
},
'query': """
SELECT mode,
pc.relname,
count(*) AS %s
FROM pg_locks l
JOIN pg_class pc ON (l.relation = pc.oid)
WHERE l.mode IS NOT NULL
AND pc.relname NOT LIKE 'pg_%%'
GROUP BY pc.relname, mode""",
'relation': False,
}
REL_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
],
'metrics': {
'seq_scan' : ('postgresql.seq_scans', RATE),
'seq_tup_read' : ('postgresql.seq_rows_read', RATE),
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
'n_tup_ins' : ('postgresql.rows_inserted', RATE),
'n_tup_upd' : ('postgresql.rows_updated', RATE),
'n_tup_del' : ('postgresql.rows_deleted', RATE),
'n_tup_hot_upd' : ('postgresql.rows_hot_updated', RATE),
'n_live_tup' : ('postgresql.live_rows', GAUGE),
'n_dead_tup' : ('postgresql.dead_rows', GAUGE),
},
'query': """
SELECT relname,schemaname,%s
FROM pg_stat_user_tables
WHERE relname = ANY(array[%s])""",
'relation': True,
}
IDX_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema'),
('indexrelname', 'index')
],
'metrics': {
'idx_scan' : ('postgresql.index_scans', RATE),
'idx_tup_read' : ('postgresql.index_rows_read', RATE),
'idx_tup_fetch' : ('postgresql.index_rows_fetched', RATE),
},
'query': """
SELECT relname,
schemaname,
indexrelname,
%s
FROM pg_stat_user_indexes
WHERE relname = ANY(array[%s])""",
'relation': True,
}
SIZE_METRICS = {
'descriptors': [
('relname', 'table'),
],
'metrics': {
'pg_table_size(C.oid) as table_size' : ('postgresql.table_size', GAUGE),
'pg_indexes_size(C.oid) as index_size' : ('postgresql.index_size', GAUGE),
'pg_total_relation_size(C.oid) as total_size' : ('postgresql.total_size', GAUGE),
},
'relation': True,
'query': """
SELECT
relname,
%s
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND
nspname !~ '^pg_toast' AND
relkind IN ('r') AND
relname = ANY(array[%s])"""
}
COUNT_METRICS = {
'descriptors': [
('schemaname', 'schema')
],
'metrics': {
'pg_stat_user_tables': ('postgresql.table.count', GAUGE),
},
'relation': False,
'query': """
SELECT schemaname, count(*)
FROM %s
GROUP BY schemaname
"""
}
REPLICATION_METRICS_9_1 = {
'CASE WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0 ELSE GREATEST (0, EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())) END': ('postgresql.replication_delay', GAUGE),
}
REPLICATION_METRICS_9_2 = {
'abs(pg_xlog_location_diff(pg_last_xlog_receive_location(), pg_last_xlog_replay_location())) AS replication_delay_bytes': ('postgres.replication_delay_bytes', GAUGE)
}
REPLICATION_METRICS = {
'descriptors': [],
'metrics': {},
'relation': False,
'query': """
SELECT %s
WHERE (SELECT pg_is_in_recovery())"""
}
CONNECTION_METRICS = {
'descriptors': [],
'metrics': {
'MAX(setting) AS max_connections': ('postgresql.max_connections', GAUGE),
'SUM(numbackends)/MAX(setting) AS pct_connections': ('postgresql.percent_usage_connections', GAUGE),
},
'relation': False,
'query': """
WITH max_con AS (SELECT setting::float FROM pg_settings WHERE name = 'max_connections')
SELECT %s
FROM pg_stat_database, max_con
"""
}
STATIO_METRICS = {
'descriptors': [
('relname', 'table'),
('schemaname', 'schema')
],
'metrics': {
'heap_blks_read' : ('postgresql.heap_blocks_read', RATE),
'heap_blks_hit' : ('postgresql.heap_blocks_hit', RATE),
'idx_blks_read' : ('postgresql.index_blocks_read', RATE),
'idx_blks_hit' : ('postgresql.index_blocks_hit', RATE),
'toast_blks_read' : ('postgresql.toast_blocks_read', RATE),
'toast_blks_hit' : ('postgresql.toast_blocks_hit', RATE),
'tidx_blks_read' : ('postgresql.toast_index_blocks_read', RATE),
'tidx_blks_hit' : ('postgresql.toast_index_blocks_hit', RATE),
},
'query': """
SELECT relname,
schemaname,
%s
FROM pg_statio_user_tables
WHERE relname = ANY(array[%s])""",
'relation': True,
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.dbs = {}
self.versions = {}
self.instance_metrics = {}
self.bgw_metrics = {}
self.db_instance_metrics = []
self.db_bgw_metrics = []
self.replication_metrics = {}
self.custom_metrics = {}
def _get_version(self, key, db):
if key not in self.versions:
cursor = db.cursor()
cursor.execute('SHOW SERVER_VERSION;')
result = cursor.fetchone()
try:
version = map(int, result[0].split('.'))
except Exception:
version = result[0]
self.versions[key] = version
self.service_metadata('version', self.versions[key])
return self.versions[key]
def _is_above(self, key, db, version_to_compare):
version = self._get_version(key, db)
if type(version) == list:
return version >= version_to_compare
return False
def _is_9_1_or_above(self, key, db):
return self._is_above(key, db, [9,1,0])
def _is_9_2_or_above(self, key, db):
return self._is_above(key, db, [9,2,0])
def _get_instance_metrics(self, key, db):
"""Use either COMMON_METRICS or COMMON_METRICS + NEWER_92_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.instance_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_instance_metrics:
self.instance_metrics[key] = None
self.log.debug("Not collecting instance metrics for key: {0} as"
" they are already collected by another instance".format(key))
return None
self.db_instance_metrics.append(sub_key)
if self._is_9_2_or_above(key, db):
self.instance_metrics[key] = dict(self.COMMON_METRICS, **self.NEWER_92_METRICS)
else:
self.instance_metrics[key] = dict(self.COMMON_METRICS)
metrics = self.instance_metrics.get(key)
return metrics
def _get_bgw_metrics(self, key, db):
"""Use either COMMON_BGW_METRICS or COMMON_BGW_METRICS + NEWER_92_BGW_METRICS
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
# Extended 9.2+ metrics if needed
metrics = self.bgw_metrics.get(key)
if metrics is None:
# Hack to make sure that if we have multiple instances that connect to
# the same host, port, we don't collect metrics twice
# as it will result in https://github.com/DataDog/dd-agent/issues/1211
sub_key = key[:2]
if sub_key in self.db_bgw_metrics:
self.bgw_metrics[key] = None
self.log.debug("Not collecting bgw metrics for key: {0} as"
" they are already collected by another instance".format(key))
return None
self.db_bgw_metrics.append(sub_key)
self.bgw_metrics[key] = dict(self.COMMON_BGW_METRICS)
if self._is_9_1_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_91_BGW_METRICS)
if self._is_9_2_or_above(key, db):
self.bgw_metrics[key].update(self.NEWER_92_BGW_METRICS)
metrics = self.bgw_metrics.get(key)
return metrics
def _get_replication_metrics(self, key, db):
""" Use either REPLICATION_METRICS_9_1 or REPLICATION_METRICS_9_1 + REPLICATION_METRICS_9_2
depending on the postgres version.
Uses a dictionnary to save the result for each instance
"""
metrics = self.replication_metrics.get(key)
if self._is_9_1_or_above(key, db) and metrics is None:
self.replication_metrics[key] = dict(self.REPLICATION_METRICS_9_1)
if self._is_9_2_or_above(key, db):
self.replication_metrics[key].update(self.REPLICATION_METRICS_9_2)
metrics = self.replication_metrics.get(key)
return metrics
def _build_relations_config(self, yamlconfig):
"""Builds a dictionary from relations configuration while maintaining compatibility
"""
config = {}
for element in yamlconfig:
try:
if isinstance(element, str):
config[element] = {'relation_name': element, 'schemas': []}
elif isinstance(element, dict):
name = element['relation_name']
config[name] = {}
config[name]['schemas'] = element['schemas']
config[name]['relation_name'] = name
else:
self.log.warn('Unhandled relations config type: %s' % str(element))
except KeyError:
self.log.warn('Failed to parse config element=%s, check syntax' % str(element))
return config
def _collect_stats(self, key, db, instance_tags, relations, custom_metrics):
"""Query pg_stat_* for various metrics
If relations is not an empty list, gather per-relation metrics
on top of that.
If custom_metrics is not an empty list, gather custom metrics defined in postgres.yaml
"""
metric_scope = [
self.CONNECTION_METRICS,
self.LOCK_METRICS,
self.COUNT_METRICS,
]
# These are added only once per PG server, thus the test
db_instance_metrics = self._get_instance_metrics(key, db)
bgw_instance_metrics = self._get_bgw_metrics(key, db)
if db_instance_metrics is not None:
# FIXME: constants shouldn't be modified
self.DB_METRICS['metrics'] = db_instance_metrics
metric_scope.append(self.DB_METRICS)
if bgw_instance_metrics is not None:
# FIXME: constants shouldn't be modified
self.BGW_METRICS['metrics'] = bgw_instance_metrics
metric_scope.append(self.BGW_METRICS)
# Do we need relation-specific metrics?
if relations:
metric_scope += [
self.REL_METRICS,
self.IDX_METRICS,
self.SIZE_METRICS,
self.STATIO_METRICS
]
relations_config = self._build_relations_config(relations)
replication_metrics = self._get_replication_metrics(key, db)
if replication_metrics is not None:
# FIXME: constants shouldn't be modified
self.REPLICATION_METRICS['metrics'] = replication_metrics
metric_scope.append(self.REPLICATION_METRICS)
full_metric_scope = list(metric_scope) + custom_metrics
try:
cursor = db.cursor()
for scope in full_metric_scope:
if scope == self.REPLICATION_METRICS or not self._is_above(key, db, [9,0,0]):
log_func = self.log.debug
else:
log_func = self.log.warning
# build query
cols = scope['metrics'].keys() # list of metrics to query, in some order
# we must remember that order to parse results
try:
# if this is a relation-specific query, we need to list all relations last
if scope['relation'] and len(relations) > 0:
relnames = ', '.join("'{0}'".format(w) for w in relations_config.iterkeys())
query = scope['query'] % (", ".join(cols), "%s") # Keep the last %s intact
self.log.debug("Running query: %s with relations: %s" % (query, relnames))
cursor.execute(query % (relnames))
else:
query = scope['query'] % (", ".join(cols))
self.log.debug("Running query: %s" % query)
cursor.execute(query.replace(r'%', r'%%'))
results = cursor.fetchall()
except ProgrammingError, e:
log_func("Not all metrics may be available: %s" % str(e))
continue
if not results:
continue
if scope in custom_metrics and len(results) > MAX_CUSTOM_RESULTS:
self.warning(
"Query: {0} returned more than {1} results ({2}). Truncating"
.format(query, MAX_CUSTOM_RESULTS, len(results))
)
results = results[:MAX_CUSTOM_RESULTS]
# FIXME this cramps my style
if scope == self.DB_METRICS:
self.gauge("postgresql.db.count", len(results),
tags=[t for t in instance_tags if not t.startswith("db:")])
desc = scope['descriptors']
# parse & submit results
# A row should look like this
# (descriptor, descriptor, ..., value, value, value, value, ...)
# with descriptor a PG relation or index name, which we use to create the tags
for row in results:
# Check that all columns will be processed
assert len(row) == len(cols) + len(desc)
# build a map of descriptors and their values
desc_map = dict(zip([x[1] for x in desc], row[0:len(desc)]))
if 'schema' in desc_map:
try:
relname = desc_map['table']
config_schemas = relations_config[relname]['schemas']
if config_schemas and desc_map['schema'] not in config_schemas:
continue
except KeyError:
pass
# Build tags
# descriptors are: (pg_name, dd_tag_name): value
# Special-case the "db" tag, which overrides the one that is passed as instance_tag
# The reason is that pg_stat_database returns all databases regardless of the
# connection.
if not scope['relation']:
tags = [t for t in instance_tags if not t.startswith("db:")]
else:
tags = [t for t in instance_tags]
tags += [("%s:%s" % (k,v)) for (k,v) in desc_map.iteritems()]
# [(metric-map, value), (metric-map, value), ...]
# metric-map is: (dd_name, "rate"|"gauge")
# shift the results since the first columns will be the "descriptors"
values = zip([scope['metrics'][c] for c in cols], row[len(desc):])
# To submit simply call the function for each value v
# v[0] == (metric_name, submit_function)
# v[1] == the actual value
# tags are
for v in values:
v[0][1](self, v[0][0], v[1], tags=tags)
cursor.close()
except InterfaceError, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
except socket.error, e:
self.log.error("Connection error: %s" % str(e))
raise ShouldRestartException
def _get_service_check_tags(self, host, port, dbname):
service_check_tags = [
"host:%s" % host,
"port:%s" % port,
"db:%s" % dbname
]
return service_check_tags
def get_connection(self, key, host, port, user, password, dbname, ssl, use_cached=True):
"Get and memoize connections to instances"
if key in self.dbs and use_cached:
return self.dbs[key]
elif host != "" and user != "":
try:
if host == 'localhost' and password == '':
# Use ident method
connection = pg.connect("user=%s dbname=%s" % (user, dbname))
elif port != '':
connection = pg.connect(host=host, port=port, user=user,
password=password, database=dbname, ssl=ssl)
else:
connection = pg.connect(host=host, user=user, password=password,
database=dbname, ssl=ssl)
except Exception as e:
message = u'Error establishing postgres connection: %s' % (str(e))
service_check_tags = self._get_service_check_tags(host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=message)
raise
else:
if not host:
raise CheckException("Please specify a Postgres host to connect to.")
elif not user:
raise CheckException("Please specify a user to connect to Postgres as.")
self.dbs[key] = connection
return connection
def _get_custom_metrics(self, custom_metrics, key):
# Pre-processed cached custom_metrics
if key in self.custom_metrics:
return self.custom_metrics[key]
# Otherwise pre-process custom metrics and verify definition
required_parameters = ("descriptors", "metrics", "query", "relation")
for m in custom_metrics:
for param in required_parameters:
if param not in m:
raise CheckException("Missing {0} parameter in custom metric".format(param))
self.log.debug("Metric: {0}".format(m))
for ref, (_, mtype) in m['metrics'].iteritems():
cap_mtype = mtype.upper()
if cap_mtype not in ('RATE', 'GAUGE', 'MONOTONIC'):
raise CheckException("Collector method {0} is not known."
" Known methods are RATE, GAUGE, MONOTONIC".format(cap_mtype))
m['metrics'][ref][1] = getattr(PostgreSql, cap_mtype)
self.log.debug("Method: %s" % (str(mtype)))
self.custom_metrics[key] = custom_metrics
return custom_metrics
def check(self, instance):
host = instance.get('host', '')
port = instance.get('port', '')
user = instance.get('username', '')
password = instance.get('password', '')
tags = instance.get('tags', [])
dbname = instance.get('dbname', None)
relations = instance.get('relations', [])
ssl = _is_affirmative(instance.get('ssl', False))
if relations and not dbname:
self.warning('"dbname" parameter must be set when using the "relations" parameter.')
if dbname is None:
dbname = 'postgres'
key = (host, port, dbname)
custom_metrics = self._get_custom_metrics(instance.get('custom_metrics', []), key)
# Clean up tags in case there was a None entry in the instance
# e.g. if the yaml contains tags: but no actual tags
if tags is None:
tags = []
else:
tags = list(set(tags))
# preset tags to the database name
tags.extend(["db:%s" % dbname])
self.log.debug("Custom metrics: %s" % custom_metrics)
# preset tags to the database name
db = None
# Collect metrics
try:
# Check version
db = self.get_connection(key, host, port, user, password, dbname, ssl)
version = self._get_version(key, db)
self.log.debug("Running check against version %s" % version)
self._collect_stats(key, db, tags, relations, custom_metrics)
except ShouldRestartException:
self.log.info("Resetting the connection")
db = self.get_connection(key, host, port, user, password, dbname, ssl, use_cached=False)
self._collect_stats(key, db, tags, relations, custom_metrics)
if db is not None:
service_check_tags = self._get_service_check_tags(host, port, dbname)
message = u'Established connection to postgres://%s:%s/%s' % (host, port, dbname)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags, message=message)
try:
# commit to close the current query transaction
db.commit()
except Exception, e:
self.log.warning("Unable to commit: {0}".format(e))
| bsd-3-clause |
Captnoord/openpli-enigma2 | lib/python/Components/Pixmap.py | 11 | 3117 | from ConditionalWidget import ConditionalWidget
from GUIComponent import GUIComponent
from enigma import ePixmap, eTimer
from Tools.Directories import resolveFilename, SCOPE_SKIN_IMAGE
from os import path
from skin import loadPixmap
class Pixmap(GUIComponent):
GUI_WIDGET = ePixmap
def getSize(self):
s = self.instance.size()
return (s.width(), s.height())
class PixmapConditional(ConditionalWidget, Pixmap):
def __init__(self, withTimer = True):
ConditionalWidget.__init__(self)
Pixmap.__init__(self)
class MovingPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.moving = False
# TODO: get real values
self.x = 0.0
self.y = 0.0
self.clearPath()
self.moveTimer = eTimer()
self.moveTimer.callback.append(self.doMove)
def clearPath(self, repeated = False):
if (self.moving):
self.moving = False
self.moveTimer.stop()
self.path = []
self.currDest = 0
self.repeated = repeated
def addMovePoint(self, x, y, time = 20):
self.path.append((x, y, time))
def moveTo(self, x, y, time = 20):
self.clearPath()
self.addMovePoint(x, y, time)
def startMoving(self):
if not self.moving:
self.time = self.path[self.currDest][2]
self.stepX = (self.path[self.currDest][0] - self.x) / float(self.time)
self.stepY = (self.path[self.currDest][1] - self.y) / float(self.time)
self.moving = True
self.moveTimer.start(100)
def stopMoving(self):
self.moving = False
self.moveTimer.stop()
def doMove(self):
self.x += self.stepX
self.y += self.stepY
self.time -= 1
try:
self.move(int(self.x), int(self.y))
except: # moving not possible... widget not there any more... stop moving
self.stopMoving()
if (self.time == 0):
self.currDest += 1
self.moveTimer.stop()
self.moving = False
if (self.currDest >= len(self.path)): # end of path
if (self.repeated):
self.currDest = 0
self.moving = False
self.startMoving()
else:
self.moving = False
self.startMoving()
class MultiPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.pixmaps = []
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
skin_path_prefix = getattr(screen, "skin_path", path)
pixmap = None
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "pixmaps":
pixmaps = value.split(',')
for p in pixmaps:
self.pixmaps.append(loadPixmap(resolveFilename(SCOPE_SKIN_IMAGE, p, path_prefix=skin_path_prefix), desktop) )
if not pixmap:
pixmap = resolveFilename(SCOPE_SKIN_IMAGE, pixmaps[0], path_prefix=skin_path_prefix)
elif attrib == "pixmap":
pixmap = resolveFilename(SCOPE_SKIN_IMAGE, value, path_prefix=skin_path_prefix)
else:
attribs.append((attrib,value))
if pixmap:
attribs.append(("pixmap", pixmap))
self.skinAttributes = attribs
return GUIComponent.applySkin(self, desktop, screen)
def setPixmapNum(self, x):
if self.instance:
if len(self.pixmaps) > x:
self.instance.setPixmap(self.pixmaps[x])
else:
print "setPixmapNum(%d) failed! defined pixmaps:" %(x), self.pixmaps
| gpl-2.0 |
tpetmanson/jsvabamorf | createbinding.py | 1 | 1138 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import os
import json
def get_sources(src_dir='src', ending='.cpp'):
"""Function to get a list of files ending with `ending` in `src_dir`."""
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
# define directories for vabamorf source directories
dirs = ['etana', 'etyhh', 'fsc', 'json', 'proof']
src_dirs = [os.path.join('src', d) for d in dirs]
# define a list of C++ source files
lib_sources = []
vabamorf_src = os.path.join('src', 'etana', 'vabamorf.cpp')
for d in src_dirs:
lib_sources.extend(get_sources(d))
lib_sources.append('vabamorf_wrap.cxx')
# define directories for vabamorf include directories
dirs.append(os.path.join('fsc', 'fsjni'))
include_dirs = [os.path.join('include', d) for d in dirs]
binding = {
'targets': [{
'target_name': 'jsvabamorf',
'sources': lib_sources,
'include_dirs': include_dirs,
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
}],
}
print (json.dumps(binding, indent=4))
| lgpl-3.0 |
cssmlulu/zhihu-py3 | test/zhihu-test.py | 3 | 15989 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '7sDream'
import os
import shutil
from zhihu import ZhihuClient, ActType
def test_question():
url = 'http://www.zhihu.com/question/24825703'
question = client.question(url)
# 获取该问题的详细描述
print(question.title)
# 亲密关系之间要说「谢谢」吗?
# 获取该问题的详细描述
print(question.details)
# 从小父母和大家庭里,.......什么时候不该说"谢谢”??
# 获取回答个数
print(question.answer_num)
# 630
# 获取关注该问题的人数
print(question.follower_num)
# 4326
# 获取关注问题的用户
for _, follower in zip(range(0, 10), question.followers):
print(follower.name)
# J Drop
# 熊猫
# Steve He
# ...
# 获取该问题所属话题
print(question.topics)
# ['心理学', '恋爱', '社会', '礼仪', '亲密关系']
# 获取排名第一的回答的点赞数
print(question.top_answer.upvote_num)
# 197
# 获取排名前十的十个回答的点赞数
for answer in question.top_i_answers(10):
print(answer.author.name, answer.upvote_num)
# 49
# 89
# 425
# ...
def test_answer():
url = 'http://www.zhihu.com/question/24825703/answer/30975949'
answer = client.answer(url)
# 获取答案url
print(answer.url)
# 获取该答案所在问题标题
print(answer.question.title)
# 关系亲密的人之间要说「谢谢」吗?
# 获取该答案作者名
print(answer.author.name)
# 甜阁下
# 获取答案赞同数
print(answer.upvote_num)
# 1155
# 获取答案点赞人昵称和感谢赞同提问回答数
for _, upvoter in zip(range(1, 10), answer.upvoters):
print(upvoter.name, upvoter.upvote_num, upvoter.thank_num,
upvoter.question_num, upvoter.answer_num, upvoter.is_zero_user())
# ...
# 五月 42 15 3 35
# 陈半边 6311 1037 3 101
# 刘柯 3107 969 273 36
#
# 三零用户比例 36.364%
# 获取答案内容的HTML
print(answer.content)
# <html>
# ...
# </html>
# 保存HTML
answer.save(filepath='.')
# 当前目录下生成 "亲密关系之间要说「谢谢」吗? - 甜阁下.html"
# 保存markdown
answer.save(filepath='.', mode="md")
# 当前目录下生成 "亲密关系之间要说「谢谢」吗? - 甜阁下.md"
def test_author():
url = 'http://www.zhihu.com/people/7sdream'
author = client.author(url)
# 获取用户名称
print(author.name)
# 7sDream
# 获取用户介绍
print(author.motto)
# 二次元新居民/软件爱好者/零回答消灭者
# 获取用户头像地址
print(author.photo_url)
# http://pic3.zhimg.com/893fd554c8aa57196d5ab98530ef479a_r.jpg
# 获取用户得到赞同数
print(author.upvote_num)
# 1338
# 获取用户得到感谢数
print(author.thank_num)
# 468
# 获取用户关注人数
print(author.followee_num)
# 82
# 获取用户关注人
for _, followee in zip(range(0, 10), author.followees):
print(followee.name)
# yuwei
# falling
# 伍声
# bhuztez
# 段晓晨
# 冯东
# ...
# 获取用户粉丝数
print(author.follower_num)
# 303
# 获得用户粉丝
for _, follower in zip(range(0, 10), author.followers):
print(follower.name)
# yuwei
# falling
# 周非
# 陈泓瑾
# O1Operator
# ...
# 获取用户提问数
print(author.question_num)
# 16
# 获取用户所有提问的标题
for _, question in zip(range(0, 10), author.questions):
print(question.title)
# 用户「松阳先生」的主页出了什么问题?
# C++运算符重载在头文件中应该如何定义?
# 亚马逊应用市场的应用都是正版的吗?
# ...
# 获取用户答题数
print(author.answer_num)
# 247
# 获取用户所有回答的点赞数
for _, answer in zip(range(0, 10), author.answers):
print(answer.upvote_num)
# 0
# 5
# 12
# 0
# ...
# 获取用户文章数
print(author.post_num)
# 0
# 获取用户专栏名
for column in author.columns:
print(column.name)
# 我没有专栏T^T
# 获取用户收藏夹数
print(author.collection_num)
# 5
# 获取用户收藏夹名
for collection in author.collections:
print(collection.name)
# 教学精品。
# 可以留着慢慢看~
# OwO
# 一句。
# Read it later
# 获取用户动态
for _, act in zip(range(0, 10), author.activities):
print(act.content.url)
if act.type == ActType.FOLLOW_COLUMN:
print('%s 在 %s 关注了专栏 %s' %
(author.name, act.time, act.column.name))
elif act.type == ActType.FOLLOW_QUESTION:
print('%s 在 %s 关注了问题 %s' %
(author.name, act.time, act.question.title))
elif act.type == ActType.ASK_QUESTION:
print('%s 在 %s 提了个问题 %s' %
(author.name, act.time, act.question.title))
elif act.type == ActType.UPVOTE_POST:
print('%s 在 %s 赞同了专栏 %s 中 %s 的文章 %s, '
'此文章赞同数 %d, 评论数 %d' %
(author.name, act.time, act.post.column.name,
act.post.author.name, act.post.title, act.post.upvote_num,
act.post.comment_num))
elif act.type == ActType.PUBLISH_POST:
print('%s 在 %s 在专栏 %s 中发布了文章 %s, '
'此文章赞同数 %d, 评论数 %d' %
(author.name, act.time, act.post.column.name,
act.post.title, act.post.upvote_num,
act.post.comment_num))
elif act.type == ActType.UPVOTE_ANSWER:
print('%s 在 %s 赞同了问题 %s 中 %s(motto: %s) 的回答, '
'此回答赞同数 %d' %
(author.name, act.time, act.answer.question.title,
act.answer.author.name, act.answer.author.motto,
act.answer.upvote_num))
elif act.type == ActType.ANSWER_QUESTION:
print('%s 在 %s 回答了问题 %s 此回答赞同数 %d' %
(author.name, act.time, act.answer.question.title,
act.answer.upvote_num))
elif act.type == ActType.FOLLOW_TOPIC:
print('%s 在 %s 关注了话题 %s' %
(author.name, act.time, act.topic.name))
def test_collection():
url = 'http://www.zhihu.com/collection/28698204'
collection = client.collection(url)
# 获取收藏夹名字
print(collection.name)
# 可以用来背的答案
# 获取收藏夹关注人数
print(collection.follower_num)
# 6343
# 获取收藏夹关注用户
for _, follower in zip(range(0, 10), collection.followers):
print(follower.name)
# 花椰菜
# 邱火羽白
# 枫丹白露
# ...
# 获取收藏夹创建者名字
print(collection.owner.name)
# 7sDream
# 获取收藏夹内所有答案的点赞数
for _, answer in zip(range(0, 10), collection.answers):
print(answer.upvote_num)
# 2561
# 535
# 223
# ...
# 获取收藏夹内所有问题标题
for _, question in zip(range(0, 10), collection.questions):
print(question.title)
# 如何完成标准的平板支撑?
# 有没有适合 Android 开发初学者的 App 源码推荐?
# 如何挑逗女朋友?
# 有哪些计算机的书适合推荐给大一学生?
# ...
def test_column():
url = 'http://zhuanlan.zhihu.com/xiepanda'
column = client.column(url)
# 获取专栏名
print(column.name)
# 谢熊猫出没注意
# 获取关注人数
print(column.follower_num)
# 73570
# 获取文章数量
print(column.post_num)
# 68
# 获取所有文章标题
for _, post in zip(range(0, 10), column.posts):
print(post.title)
# 伦敦,再见。London, Pride.
# 为什么你来到伦敦?——没有抽到h1b
# “城邦之国”新加坡强在哪?
# ...
def test_post():
url = 'http://zhuanlan.zhihu.com/xiepanda/19950456'
post = client.post(url)
# 获取文章地址
print(post.url)
# 获取文章标题
print(post.title)
# 为什么最近有很多名人,比如比尔盖茨,马斯克、霍金等,让人们警惕人工智能?
# 获取所在专栏名称
print(post.column.name)
# 谢熊猫出没注意
# 获取作者名称
print(post.author.name)
# 谢熊猫君
# 获取赞同数
print(post.upvote_num)
# 18491
# 获取评论数
print(post.comment_num)
# 1748
# 保存为 markdown
post.save(filepath='.')
# 当前目录下生成
# 为什么最近有很多名人,比如比尔盖茨,马斯克、霍金等,让人们警惕人工智能? - 谢熊猫君.md
def test_topic():
url = 'http://www.zhihu.com/topic/19947695/'
topic = client.topic(url)
# 获取话题地址
print(topic.url)
# http://www.zhihu.com/topic/19947695/
# 获取话题名称
print(topic.name)
# 深网(Deep Web)
# 获取话题描述信息
print(topic.description)
# 暗网(英语:Deep Web,又称深网、不可见网、隐藏网)
# 获取话题头像url
print(topic.photo_url)
# http://pic1.zhimg.com/a3b0d77c052e45399da1fe26fb4c9734_r.jpg
# 获取话题关注人数
print(topic.follower_num)
# 3309
# 获取关注者
for _, follower in zip(range(0, 10), topic.followers):
print(follower.name, follower.motto)
# 韦小宝 韦小宝
# 吉陆遥 吉陆遥
# qingyuan ma qingyuan ma
# ...
# 获取父话题
for _, parent in zip(range(0, 10), topic.parents):
print(parent.name)
# 互联网
# 获取子话题
for _, child in zip(range(0, 10), topic.children):
print(child.name)
# Tor
# 获取优秀答主
for _, author in zip(range(0, 10), topic.top_authors):
print(author.name, author.motto)
# Ben Chen
# acel rovsion 我只不过规范基点上的建构论者
# 沈万马
# ...
# 获取话题下的精华回答
for _, ans in zip(range(0, 10), topic.top_answers):
print(ans.question.title, ans.author.name, ans.upvote_num)
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? Ben Chen 2956
# 黑客逃避追踪,为什么要用虚拟机 + TOR + VPN 呢? Ario 526
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? acel rovsion 420
# ...
# 按时间由近到远获取所有问题
for _, question in zip(range(0, 10), topic.questions):
print(question.title)
# 马里亚纳网络存在吗?
# 玛丽亚纳网络存在吗?
# 为什么暗网里这么多违法的东西FBI不顺藤摸瓜呢?
# ...
# 按时间由近到远获取所有回答
for _, ans in zip(range(0, 10), topic.answers):
print(ans.question.title, ans.author.name, ans.upvote_num)
# 如何用tor登陆qq? 匿名用户 0
# 想看一下暗网(deep web),但是怕中病毒,所以有谁能发发截图?? tor 0
# icq是什么 为什么暗网交流一般都用icq? tor 0
# ...
# 获取话题下的热门问题,按热门度由高到低返回
for _, q in zip(range(0, 10), topic.hot_questions):
print(q.title)
# 《纸牌屋》中提到的深网 (Deep Web) 是什么?
# 黑客逃避追踪,为什么要用虚拟机 + TOR + VPN 呢?
# 微博热传的关于暗网的变态故事是真的还是假的啊?
# ...
# 获取话题下的热门回答,按热门度由高到低返回
for _, ans in zip(range(0, 10), topic.hot_answers):
print(ans.question.title, ans.author.name, ans.upvote_num)
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? Ben Chen 3006
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? 提刀夜行 123
# 《纸牌屋》中提到的深网 (Deep Web) 是什么? 匿名用户 21
# ...
def test_me():
"""
本函数默认不会开启,因为函数涉及到点赞,反对,感谢,关注等主观操作
请确认您有能力在测试代码生错误的情况下,判断出出错在哪一行,
并将代码进行的操作回滚(即:将点赞,感谢,关注等操作取消)
如果确认有能力,请填写代码中的空白,并将test函数中相关行注释取消
"""
answer = client.answer('') # 填写答案Url,不可填写自己的答案
post = client.post('') # 填写文章Url,不可填写自己的文章
author = client.author('') # 填写用户Url,不可填写自己
question = client.question('') # 填写问题Url
topic = client.topic('') # 填写话题Url
collection = client.collection('') # 填写收藏夹Url
me = client.me()
print('赞同答案...', end='')
assert me.vote(answer, 'up') # 赞同
assert me.vote(answer, 'down') # 反对
assert me.vote(answer, 'clear') # 清除
print('通过')
print('感谢答案...', end='')
assert me.thanks(answer) # 感谢
assert me.thanks(answer, False) # 取消感谢
print('通过')
print('赞同文章...', end='')
assert me.vote(post, 'up') # 赞同
assert me.vote(post, 'down') # 反对
assert me.vote(post, 'clear') # 清除
print('通过')
print('关注用户...', end='')
assert me.follow(author) # 关注
assert me.follow(author, False) # 取消关注
print('通过')
print('关注问题...', end='')
assert me.follow(question) # 关注
assert me.follow(question, False) # 取消关注
print('通过')
print('关注话题...', end='')
assert me.follow(topic) # 关注
assert me.follow(topic, False) # 取消关注
print('通过')
print('关注收藏夹...', end='')
assert me.follow(collection) # 关注
assert me.follow(collection, False) # 取消关注
print('通过')
def test():
test_question()
test_answer()
test_author()
test_collection()
test_column()
test_post()
test_topic()
# test_me()
if __name__ == '__main__':
Cookies_File = 'cookies.json'
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(BASE_DIR, 'test')
print("Test dir: ", TEST_DIR)
if os.path.exists(TEST_DIR):
print("Cleaning it...", end='')
shutil.rmtree(TEST_DIR)
print("Done")
else:
print("Test dir not exist.")
os.chdir(BASE_DIR)
if os.path.isfile(Cookies_File):
print("Cookies file found.")
client = ZhihuClient(Cookies_File)
else:
print("Cookies file not exist, please login...")
client = ZhihuClient()
cookies_str = client.login_in_terminal()
with open(Cookies_File, 'w') as f:
f.write(cookies_str)
print("Making test dir...", end="")
os.mkdir(TEST_DIR)
print("Done", end="\n\n")
os.chdir(TEST_DIR)
print("===== test start =====")
import timeit
try:
time = timeit.timeit(
'test()', setup='from __main__ import test', number=1)
print('===== test passed =====')
print('no error happen')
print('time used: {0} ms'.format(time * 1000))
except Exception as e:
print('===== test failed =====')
raise e
finally:
os.chdir(BASE_DIR)
print("Cleaning...", end='')
shutil.rmtree(TEST_DIR)
print("Done")
| mit |
postla/e2-gui | lib/python/Components/ConfigList.py | 5 | 7657 | from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from config import KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_0, KEY_DELETE, KEY_BACKSPACE, KEY_OK, KEY_TOGGLEOW, KEY_ASCII, KEY_TIMEOUT, KEY_NUMBERS, ConfigElement, ConfigText, ConfigPassword
from Components.ActionMap import NumberActionMap, ActionMap
from enigma import eListbox, eListboxPythonConfigContent, eRCInput, eTimer
from Screens.MessageBox import MessageBox
class ConfigList(HTMLComponent, GUIComponent, object):
def __init__(self, list, session = None):
GUIComponent.__init__(self)
self.l = eListboxPythonConfigContent()
self.l.setSeperation(200)
self.timer = eTimer()
self.list = list
self.onSelectionChanged = [ ]
self.current = None
self.session = session
def execBegin(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
self.timer.callback.append(self.timeout)
def execEnd(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.timer.callback.remove(self.timeout)
def toggle(self):
selection = self.getCurrent()
selection[1].toggle()
self.invalidateCurrent()
def handleKey(self, key):
selection = self.getCurrent()
if selection and selection[1].enabled:
selection[1].handleKey(key)
self.invalidateCurrent()
if key in KEY_NUMBERS:
self.timer.start(1000, 1)
def getCurrent(self):
return self.l.getCurrentSelection()
def getCurrentIndex(self):
return self.l.getCurrentSelectionIndex()
def setCurrentIndex(self, index):
if self.instance is not None:
self.instance.moveSelectionTo(index)
def invalidateCurrent(self):
self.l.invalidateEntry(self.l.getCurrentSelectionIndex())
def invalidate(self, entry):
# when the entry to invalidate does not exist, just ignore the request.
# this eases up conditional setup screens a lot.
if entry in self.__list:
self.l.invalidateEntry(self.__list.index(entry))
GUI_WIDGET = eListbox
def selectionChanged(self):
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onDeselect(self.session)
self.current = self.getCurrent()
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onSelect(self.session)
else:
return
for x in self.onSelectionChanged:
x()
def postWidgetCreate(self, instance):
instance.selectionChanged.get().append(self.selectionChanged)
instance.setContent(self.l)
self.instance.setWrapAround(True)
def preWidgetRemove(self, instance):
if isinstance(self.current,tuple) and len(self.current) >= 2:
self.current[1].onDeselect(self.session)
instance.selectionChanged.get().remove(self.selectionChanged)
instance.setContent(None)
def setList(self, l):
self.timer.stop()
self.__list = l
self.l.setList(self.__list)
if l is not None:
for x in l:
assert len(x) < 2 or isinstance(x[1], ConfigElement), "entry in ConfigList " + str(x[1]) + " must be a ConfigElement"
def getList(self):
return self.__list
list = property(getList, setList)
def timeout(self):
self.handleKey(KEY_TIMEOUT)
def isChanged(self):
is_changed = False
for x in self.list:
is_changed |= x[1].isChanged()
return is_changed
def pageUp(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageUp)
def pageDown(self):
if self.instance is not None:
self.instance.moveSelection(self.instance.pageDown)
class ConfigListScreen:
def __init__(self, list, session = None, on_change = None):
self["config_actions"] = NumberActionMap(["SetupActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.keyOK,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"toggleOverwrite": self.keyToggleOW,
"pageUp": self.keyPageUp,
"pageDown": self.keyPageDown,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1) # to prevent left/right overriding the listbox
self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
self["config"] = ConfigList(list, session = session)
if on_change is not None:
self.__changed = on_change
else:
self.__changed = lambda: None
if not self.handleInputHelpers in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.handleInputHelpers)
def handleInputHelpers(self):
if self["config"].getCurrent() is not None:
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def KeyText(self):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].getValue())
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
def keyOK(self):
self["config"].handleKey(KEY_OK)
def keyLeft(self):
self["config"].handleKey(KEY_LEFT)
self.__changed()
def keyRight(self):
self["config"].handleKey(KEY_RIGHT)
self.__changed()
def keyHome(self):
self["config"].handleKey(KEY_HOME)
self.__changed()
def keyEnd(self):
self["config"].handleKey(KEY_END)
self.__changed()
def keyDelete(self):
self["config"].handleKey(KEY_DELETE)
self.__changed()
def keyBackspace(self):
self["config"].handleKey(KEY_BACKSPACE)
self.__changed()
def keyToggleOW(self):
self["config"].handleKey(KEY_TOGGLEOW)
self.__changed()
def keyGotAscii(self):
self["config"].handleKey(KEY_ASCII)
self.__changed()
def keyNumberGlobal(self, number):
self["config"].handleKey(KEY_0 + number)
self.__changed()
def keyPageDown(self):
self["config"].pageDown()
def keyPageUp(self):
self["config"].pageUp()
def saveAll(self):
for x in self["config"].list:
x[1].save()
# keySave and keyCancel are just provided in case you need them.
# you have to call them by yourself.
def keySave(self):
self.saveAll()
self.close()
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def closeMenuList(self, recursive = False):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close(recursive)
def keyCancel(self):
self.closeMenuList()
def closeRecursive(self):
self.closeMenuList(True)
| gpl-2.0 |
LLluma/ea_web | addons/web_keyboard_shortcuts/__openerp__.py | 4 | 1964 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2012 Elico Corp. All Rights Reserved.
# Author: Yannick Gouin <yannick.gouin@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Keyboard shortcuts',
'version': '1.1',
'category': 'Tools',
'description': """
This module add some keyboard shortcuts similar to the ones in the GTK-client.
On a form, mode edit:
Ctrl + S : Save the current object
On a form, mode view:
Ctrl + Delete : Delete the current object
Ctrl + N : New object
Ctrl + D : Duplicate the current object
Ctrl + Page Up : First object
Ctrl + Arrow Up : Previous object
Ctrl + Arrow Down : Next object
Ctrl + Page Down : Last object
""",
"author": "Elico Corp",
"website": "http://www.openerp.net.cn",
'depends': ['web'],
'init_xml': [],
'update_xml': [],
"js" : [
"static/src/js/web_keyboard_shortcuts.js",
],
'installable': True,
'active': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/Django/tests/modeltests/user_commands/tests.py | 48 | 1689 | import sys
from django.core import management
from django.core.management.base import CommandError
from django.test import TestCase
from django.utils import translation
from django.utils.six import StringIO
class CommandTests(TestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertEqual(out.getvalue(),
"I don't feel like dancing Rock'n'Roll.\n")
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertEqual(out.getvalue(),
"I don't feel like dancing Jive.\n")
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
self.assertRaises(CommandError, management.call_command, ('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
old_stderr = sys.stderr
sys.stderr = err = StringIO()
try:
with self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
finally:
sys.stderr = old_stderr
self.assertIn("CommandError", err.getvalue())
| agpl-3.0 |
Soya93/Extract-Refactoring | python/lib/Lib/site-packages/django/contrib/localflavor/us/forms.py | 229 | 4495 | """
USA-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, CharField
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length=None, min_length=None, *args, **kwargs)
class USPhoneNumberField(CharField):
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or \
group == '00' or \
serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if area == '666' or \
(area == '987' and group == '65' and 4320 <= int(serial) <= 4329) or \
value == '078-05-1120' or \
value == '219-09-9999':
raise ValidationError(self.error_messages['invalid'])
return u'%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': _('Enter a U.S. state or territory.'),
}
def clean(self, value):
from us_states import STATES_NORMALIZED
super(USStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class USPSSelect(Select):
"""
A Select widget that uses a list of US Postal Service codes as its
choices.
"""
def __init__(self, attrs=None):
from us_states import USPS_CHOICES
super(USPSSelect, self).__init__(attrs, choices=USPS_CHOICES)
| apache-2.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/boto/ec2/securitygroup.py | 150 | 14687 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Security Group
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.exception import BotoClientError
class SecurityGroup(TaggedEC2Object):
def __init__(self, connection=None, owner_id=None,
name=None, description=None, id=None):
super(SecurityGroup, self).__init__(connection)
self.id = id
self.owner_id = owner_id
self.name = name
self.description = description
self.vpc_id = None
self.rules = IPPermissionsList()
self.rules_egress = IPPermissionsList()
def __repr__(self):
return 'SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
retval = super(SecurityGroup, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'ipPermissions':
return self.rules
elif name == 'ipPermissionsEgress':
return self.rules_egress
else:
return None
def endElement(self, name, value, connection):
if name == 'ownerId':
self.owner_id = value
elif name == 'groupId':
self.id = value
elif name == 'groupName':
self.name = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'groupDescription':
self.description = value
elif name == 'ipRanges':
pass
elif name == 'return':
if value == 'false':
self.status = False
elif value == 'true':
self.status = True
else:
raise Exception(
'Unexpected value of status %s for group %s' % (
value,
self.name
)
)
else:
setattr(self, name, value)
def delete(self, dry_run=False):
if self.vpc_id:
return self.connection.delete_security_group(
group_id=self.id,
dry_run=dry_run
)
else:
return self.connection.delete_security_group(
self.name,
dry_run=dry_run
)
def add_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip,
src_group_group_id, dry_run=False):
"""
Add a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
rule = IPPermissions(self)
rule.ip_protocol = ip_protocol
rule.from_port = from_port
rule.to_port = to_port
self.rules.append(rule)
rule.add_grant(
src_group_name,
src_group_owner_id,
cidr_ip,
src_group_group_id,
dry_run=dry_run
)
def remove_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip,
src_group_group_id, dry_run=False):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
if not self.rules:
raise ValueError("The security group has no rules")
target_rule = None
for rule in self.rules:
if rule.ip_protocol == ip_protocol:
if rule.from_port == from_port:
if rule.to_port == to_port:
target_rule = rule
target_grant = None
for grant in rule.grants:
if grant.name == src_group_name or grant.group_id == src_group_group_id:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
if target_grant:
rule.grants.remove(target_grant)
if len(rule.grants) == 0:
self.rules.remove(target_rule)
def authorize(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None, dry_run=False):
"""
Add a new rule to this security group.
You need to pass in either src_group_name
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string or list of strings
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
:rtype: bool
:return: True if successful.
"""
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.authorize_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id,
dry_run=dry_run)
if status:
if not isinstance(cidr_ip, list):
cidr_ip = [cidr_ip]
for single_cidr_ip in cidr_ip:
self.add_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, single_cidr_ip,
src_group_group_id, dry_run=dry_run)
return status
def revoke(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None, dry_run=False):
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.revoke_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id,
dry_run=dry_run)
if status:
self.remove_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, cidr_ip, src_group_group_id,
dry_run=dry_run)
return status
def copy_to_region(self, region, name=None, dry_run=False):
"""
Create a copy of this security group in another region.
Note that the new security group will be a separate entity
and will not stay in sync automatically after the copy
operation.
:type region: :class:`boto.ec2.regioninfo.RegionInfo`
:param region: The region to which this security group will be copied.
:type name: string
:param name: The name of the copy. If not supplied, the copy
will have the same name as this security group.
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The new security group.
"""
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
sg = rconn.create_security_group(
name or self.name,
self.description,
dry_run=dry_run
)
source_groups = []
for rule in self.rules:
for grant in rule.grants:
grant_nom = grant.name or grant.group_id
if grant_nom:
if grant_nom not in source_groups:
source_groups.append(grant_nom)
sg.authorize(None, None, None, None, grant,
dry_run=dry_run)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
grant.cidr_ip, dry_run=dry_run)
return sg
def instances(self, dry_run=False):
"""
Find all of the current instances that are running within this
security group.
:rtype: list of :class:`boto.ec2.instance.Instance`
:return: A list of Instance objects
"""
rs = []
if self.vpc_id:
rs.extend(self.connection.get_all_reservations(
filters={'instance.group-id': self.id},
dry_run=dry_run
))
else:
rs.extend(self.connection.get_all_reservations(
filters={'group-id': self.id},
dry_run=dry_run
))
instances = [i for r in rs for i in r.instances]
return instances
class IPPermissionsList(list):
def startElement(self, name, attrs, connection):
if name == 'item':
self.append(IPPermissions(self))
return self[-1]
return None
def endElement(self, name, value, connection):
pass
class IPPermissions(object):
def __init__(self, parent=None):
self.parent = parent
self.ip_protocol = None
self.from_port = None
self.to_port = None
self.grants = []
def __repr__(self):
return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol,
self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
if name == 'item':
self.grants.append(GroupOrCIDR(self))
return self.grants[-1]
return None
def endElement(self, name, value, connection):
if name == 'ipProtocol':
self.ip_protocol = value
elif name == 'fromPort':
self.from_port = value
elif name == 'toPort':
self.to_port = value
else:
setattr(self, name, value)
def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None,
dry_run=False):
grant = GroupOrCIDR(self)
grant.owner_id = owner_id
grant.group_id = group_id
grant.name = name
grant.cidr_ip = cidr_ip
self.grants.append(grant)
return grant
class GroupOrCIDR(object):
def __init__(self, parent=None):
self.owner_id = None
self.group_id = None
self.name = None
self.cidr_ip = None
def __repr__(self):
if self.cidr_ip:
return '%s' % self.cidr_ip
else:
return '%s-%s' % (self.name or self.group_id, self.owner_id)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'userId':
self.owner_id = value
elif name == 'groupId':
self.group_id = value
elif name == 'groupName':
self.name = value
if name == 'cidrIp':
self.cidr_ip = value
else:
setattr(self, name, value)
| bsd-3-clause |
DSLituiev/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
seken/nink | pyglet/event.py | 5 | 16578 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Event dispatch framework.
All objects that produce events in pyglet implement `EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''. If you are creating a
new class which implements `EventDispatcher`, you must call
`EventDispatcher.register_event_type` for each event type.
Attaching event handlers
========================
An event handler is simply a function or method. You can attach an event
handler by setting the appropriate function on the instance::
def on_resize(width, height):
# ...
dispatcher.on_resize = on_resize
There is also a convenience decorator that reduces typing::
@dispatcher.event
def on_resize(width, height):
# ...
You may prefer to subclass and override the event handlers instead::
class MyDispatcher(DispatcherClass):
def on_resize(self, width, height):
# ...
Event handler stack
===================
When attaching an event handler to a dispatcher using the above methods, it
replaces any existing handler (causing the original handler to no longer be
called). Each dispatcher maintains a stack of event handlers, allowing you to
insert an event handler "above" the existing one rather than replacing it.
There are two main use cases for "pushing" event handlers:
* Temporarily intercepting the events coming from the dispatcher by pushing a
custom set of handlers onto the dispatcher, then later "popping" them all
off at once.
* Creating "chains" of event handlers, where the event propagates from the
top-most (most recently added) handler to the bottom, until a handler
takes care of it.
Use `EventDispatcher.push_handlers` to create a new level in the stack and
attach handlers to it. You can push several handlers at once::
dispatcher.push_handlers(on_resize, on_key_press)
If your function handlers have different names to the events they handle, use
keyword arguments::
dispatcher.push_handlers(on_resize=my_resize,
on_key_press=my_key_press)
After an event handler has processed an event, it is passed on to the
next-lowest event handler, unless the handler returns `EVENT_HANDLED`, which
prevents further propagation.
To remove all handlers on the top stack level, use
`EventDispatcher.pop_handlers`.
Note that any handlers pushed onto the stack have precedence over the
handlers set directly on the instance (for example, using the methods
described in the previous section), regardless of when they were set.
For example, handler ``foo`` is called before handler ``bar`` in the following
example::
dispatcher.push_handlers(on_resize=foo)
dispatcher.on_resize = bar
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Event
handlers are only ever invoked as a result of calling
EventDispatcher.dispatch_events`.
It is up to the specific event dispatcher to queue relevant events until they
can be dispatched, at which point the handlers are called in the order the
events were originally generated.
This implies that your application runs with a main loop that continuously
updates the application state and checks for new events::
while True:
dispatcher.dispatch_events()
# ... additional per-frame processing
Not all event dispatchers require the call to ``dispatch_events``; check with
the particular class documentation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import inspect
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
'''An exception raised when an event handler could not be attached.
'''
pass
class EventDispatcher(object):
'''Generic event dispatcher interface.
See the module docstring for usage.
'''
# Placeholder empty stack; real stack is created only if needed
_event_stack = ()
@classmethod
def register_event_type(cls, name):
'''Register an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
'''
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
return name
def push_handlers(self, *args, **kwargs):
'''Push a level onto the top of the handler stack, then attach zero or
more event handlers.
If keyword arguments are given, they name the event type to attach.
Otherwise, a callable's `__name__` attribute will be used. Any other
object may also be specified, in which case it will be searched for
callables with event names.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = []
# Place dict full of new handlers at beginning of stack
self._event_stack.insert(0, {})
self.set_handlers(*args, **kwargs)
def _get_handlers(self, args, kwargs):
'''Implement handler matching on arguments for set_handlers and
remove_handlers.
'''
for object in args:
if inspect.isroutine(object):
# Single magically named function
name = object.__name__
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, object
else:
# Single instance with magically named methods
for name in dir(object):
if name in self.event_types:
yield name, getattr(object, name)
for name, handler in kwargs.items():
# Function for handling given event (no magic)
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
yield name, handler
def set_handlers(self, *args, **kwargs):
'''Attach one or more event handlers to the top level of the handler
stack.
See `push_handlers` for the accepted argument types.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
for name, handler in self._get_handlers(args, kwargs):
self.set_handler(name, handler)
def set_handler(self, name, handler):
'''Attach a single event handler.
:Parameters:
`name` : str
Name of the event type to attach to.
`handler` : callable
Event handler to attach.
'''
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
self._event_stack[0][name] = handler
def pop_handlers(self):
'''Pop the top level of event handlers off the stack.
'''
assert self._event_stack and 'No handlers pushed'
del self._event_stack[0]
def remove_handlers(self, *args, **kwargs):
'''Remove event handlers from the event stack.
See `push_handlers` for the accepted argument types. All handlers
are removed from the first stack frame that contains any of the given
handlers. No error is raised if any handler does not appear in that
frame, or if no stack frame contains any of the given handlers.
If the stack frame is empty after removing the handlers, it is
removed from the stack. Note that this interferes with the expected
symmetry of `push_handlers` and `pop_handlers`.
'''
handlers = list(self._get_handlers(args, kwargs))
# Find the first stack frame containing any of the handlers
def find_frame():
for frame in self._event_stack:
for name, handler in handlers:
try:
if frame[name] == handler:
return frame
except KeyError:
pass
frame = find_frame()
# No frame matched; no error.
if not frame:
return
# Remove each handler from the frame.
for name, handler in handlers:
try:
if frame[name] == handler:
del frame[name]
except KeyError:
pass
# Remove the frame if it's empty.
if not frame:
self._event_stack.remove(frame)
def remove_handler(self, name, handler):
'''Remove a single event handler.
The given event handler is removed from the first handler stack frame
it appears in. The handler must be the exact same callable as passed
to `set_handler`, `set_handlers` or `push_handlers`; and the name
must match the event type it is bound to.
No error is raised if the event handler is not set.
:Parameters:
`name` : str
Name of the event type to remove.
`handler` : callable
Event handler to remove.
'''
for frame in self._event_stack:
try:
if frame[name] is handler:
del frame[name]
break
except KeyError:
pass
def dispatch_event(self, event_type, *args):
'''Dispatch a single event to the attached handlers.
The event is propagated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
`EventDispatcher` implementors; applications should call
the ``dispatch_events`` method.
Since pyglet 1.2, the method returns `EVENT_HANDLED` if an event
handler returned `EVENT_HANDLED` or `EVENT_UNHANDLED` if all events
returned `EVENT_UNHANDLED`. If no matching event handlers are in the
stack, ``False`` is returned.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
:rtype: bool or None
:return: (Since pyglet 1.2) `EVENT_HANDLED` if an event handler
returned `EVENT_HANDLED`; `EVENT_UNHANDLED` if one or more event
handlers were invoked but returned only `EVENT_UNHANDLED`;
otherwise ``False``. In pyglet 1.1 and earler, the return value
is always ``None``.
'''
assert event_type in self.event_types
invoked = False
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
handler = frame.get(event_type, None)
if handler:
try:
invoked = True
if handler(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(event_type, args, handler)
# Check instance for an event handler
if hasattr(self, event_type):
try:
invoked = True
if getattr(self, event_type)(*args):
return EVENT_HANDLED
except TypeError:
self._raise_dispatch_exception(
event_type, args, getattr(self, event_type))
if invoked:
return EVENT_UNHANDLED
return False
def _raise_dispatch_exception(self, event_type, args, handler):
# A common problem in applications is having the wrong number of
# arguments in an event handler. This is caught as a TypeError in
# dispatch_event but the error message is obfuscated.
#
# Here we check if there is indeed a mismatch in argument count,
# and construct a more useful exception message if so. If this method
# doesn't find a problem with the number of arguments, the error
# is re-raised as if we weren't here.
n_args = len(args)
# Inspect the handler
handler_args, handler_varargs, _, handler_defaults = \
inspect.getargspec(handler)
n_handler_args = len(handler_args)
# Remove "self" arg from handler if it's a bound method
if inspect.ismethod(handler) and handler.im_self:
n_handler_args -= 1
# Allow *args varargs to overspecify arguments
if handler_varargs:
n_handler_args = max(n_handler_args, n_args)
# Allow default values to overspecify arguments
if (n_handler_args > n_args and
handler_defaults and
n_handler_args - len(handler_defaults) <= n_args):
n_handler_args = n_args
if n_handler_args != n_args:
if inspect.isfunction(handler) or inspect.ismethod(handler):
descr = '%s at %s:%d' % (
handler.func_name,
handler.func_code.co_filename,
handler.func_code.co_firstlineno)
else:
descr = repr(handler)
raise TypeError(
'%s event was dispatched with %d arguments, but '
'handler %s has an incompatible function signature' %
(event_type, len(args), descr))
else:
raise
def event(self, *args):
'''Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
'''
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
self.set_handler(name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
self.set_handler(name, func)
return args[0]
elif type(args[0]) in (str, unicode): # @window.event('on_resize')
name = args[0]
def decorator(func):
self.set_handler(name, func)
return func
return decorator
| gpl-3.0 |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/boto/ec2/instanceinfo.py | 152 | 1893 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class InstanceInfo(object):
"""
Represents an EC2 Instance status response from CloudWatch
"""
def __init__(self, connection=None, id=None, state=None):
"""
:ivar str id: The instance's EC2 ID.
:ivar str state: Specifies the current status of the instance.
"""
self.connection = connection
self.id = id
self.state = state
def __repr__(self):
return 'InstanceInfo:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId' or name == 'InstanceId':
self.id = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
| mit |
Lh4cKg/brython | www/src/Lib/encodings/iso8859_6.py | 37 | 11140 | """ Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-6',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\ufffe'
'\ufffe'
'\ufffe'
'\xa4' # 0xA4 -> CURRENCY SIGN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u060c' # 0xAC -> ARABIC COMMA
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u061b' # 0xBB -> ARABIC SEMICOLON
'\ufffe'
'\ufffe'
'\ufffe'
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\ufffe'
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
mancoast/CPythonPyc_test | crash/272_test_socket.py | 13 | 56141 | #!/usr/bin/env python
import unittest
from test import test_support
import errno
import socket
import select
import time
import traceback
import Queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
HOST = test_support.HOST
MSG = b'Michael Gilfix was here\n'
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import thread
import threading
except ImportError:
thread = None
threading = None
HOST = test_support.HOST
MSG = 'Michael Gilfix was here\n'
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = test_support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = Queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if not self.queue.empty():
msg = self.queue.get()
self.fail(msg)
def clientRun(self, test_func):
self.server_ready.wait()
self.client_ready.set()
self.clientSetUp()
with test_support.check_py3k_warnings():
if not callable(test_func):
raise TypeError("test_func must be a callable function.")
try:
test_func()
except Exception, strerror:
self.queue.put(strerror)
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.assertRaises(socket.error, raise_error,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None)
self.assertIn('not NoneType', str(cm.exception))
# 3 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', 0, sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto('foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
self.assertEqual(sys.getrefcount(__name__), orig,
"socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1L<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1L, 2L, 3L ]
bad_values = [ -1, -2, -3, -1L, -2L, -3L ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith('linux') or
sys.platform.startswith('freebsd') or
sys.platform.startswith('netbsd') or
sys.platform == 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf it it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255'))
self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255'))
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
self.assertEqual('\x00' * 16, f('::'))
self.assertEqual('\x00' * 16, f('0::0'))
self.assertEqual('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEqual(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04'))
self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEqual('::', f('\x00' * 16))
self.assertEqual('::1', f('\x00' * 15 + '\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
# XXX The following don't test module-level functionality...
def _get_unused_port(self, bind_address='0.0.0.0'):
"""Use a temporary socket to elicit an unused ephemeral port.
Args:
bind_address: Hostname or IP address to search for a port on.
Returns: A most likely to be unused port.
"""
tempsock = socket.socket()
tempsock.bind((bind_address, 0))
host, port = tempsock.getsockname()
tempsock.close()
return port
def testSockName(self):
# Testing getsockname()
port = self._get_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = self._get_unused_port(bind_address=host)
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * (1024**2))
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall, b"x" * (1024**2))
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def testListenBacklog0(self):
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
# backlog = 0
srv.listen(0)
srv.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = ''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, 'f' * 2048)
def _testSendAll(self):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), '')
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
bufsize = -1 # Use default buffer size
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
SocketConnectedTest.setUp(self)
self.serv_file = self.cli_conn.makefile('rb', self.bufsize)
def tearDown(self):
self.serv_file.close()
self.assertTrue(self.serv_file.closed)
self.serv_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.cli_file = self.serv_conn.makefile('wb')
def clientTearDown(self):
self.cli_file.close()
self.assertTrue(self.cli_file.closed)
self.cli_file = None
SocketConnectedTest.clientTearDown(self)
def testSmallRead(self):
# Performing small file read test
first_seg = self.serv_file.read(len(MSG)-3)
second_seg = self.serv_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, MSG)
def _testSmallRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testFullRead(self):
# read until EOF
msg = self.serv_file.read()
self.assertEqual(msg, MSG)
def _testFullRead(self):
self.cli_file.write(MSG)
self.cli_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = ''
while 1:
char = self.serv_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, MSG)
def _testUnbufferedRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadline(self):
# Performing file readline test
line = self.serv_file.readline()
self.assertEqual(line, MSG)
def _testReadline(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterRead(self):
a_baloo_is = self.serv_file.read(len("A baloo is"))
self.assertEqual("A baloo is", a_baloo_is)
_a_bear = self.serv_file.read(len(" a bear"))
self.assertEqual(" a bear", _a_bear)
line = self.serv_file.readline()
self.assertEqual("\n", line)
line = self.serv_file.readline()
self.assertEqual("A BALOO IS A BEAR.\n", line)
line = self.serv_file.readline()
self.assertEqual(MSG, line)
def _testReadlineAfterRead(self):
self.cli_file.write("A baloo is a bear\n")
self.cli_file.write("A BALOO IS A BEAR.\n")
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterReadNoNewline(self):
end_of_ = self.serv_file.read(len("End Of "))
self.assertEqual("End Of ", end_of_)
line = self.serv_file.readline()
self.assertEqual("Line", line)
def _testReadlineAfterReadNoNewline(self):
self.cli_file.write("End Of Line")
def testClosedAttr(self):
self.assertTrue(not self.serv_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.cli_file.closed)
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv(self, size):
return self._recv_step.next()()
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR)
def _test_readline(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.read(size), "This is the first line\n"
"And the second line is here\n")
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(bufsize=1024)
self._test_readline(size=100, bufsize=1024)
self._test_read(bufsize=1024)
self._test_read(size=100, bufsize=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "aa",
lambda : "\n",
lambda : "BB",
self._raise_eintr,
lambda : "bb",
lambda : "",
])
fo = socket._fileobject(mock_sock, bufsize=0)
self.assertEqual(fo.readline(size), "aa\n")
self.assertEqual(fo.readline(size), "BBbb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(bufsize=0)
self._test_read(size=100, bufsize=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that httplib relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.serv_file.readline() # first line
self.assertEqual(line, "A. " + MSG) # first line
self.serv_file = self.cli_conn.makefile('rb', 0)
line = self.serv_file.readline() # second line
self.assertEqual(line, "B. " + MSG) # second line
def _testUnbufferedReadline(self):
self.cli_file.write("A. " + MSG)
self.cli_file.write("B. " + MSG)
self.cli_file.flush()
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = test_support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = test_support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = test_support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send("done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, "done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class Urllib2FileobjectTest(unittest.TestCase):
# urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that
# it close the socket if the close c'tor argument is true
def testClose(self):
class MockSocket:
closed = False
def flush(self): pass
def close(self): self.closed = True
# must not close unless we request it: the original use of _fileobject
# by module socket requires that the underlying socket not be closed until
# the _socketobject that created the _fileobject is closed
s = MockSocket()
f = socket._fileobject(s)
f.close()
self.assertTrue(not s.closed)
s = MockSocket()
f = socket._fileobject(s, close=True)
f.close()
self.assertTrue(s.closed)
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketTCPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = "\x00python-test-hello\x00\xff"
s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s1.bind(address)
s1.listen(1)
s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s2.connect(s1.getsockname())
s1.accept()
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.assertRaises(socket.error, s.bind, address)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if test_support.verbose:
print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
class TIPCTest (unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
Urllib2FileobjectTest,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
m2candre/ansible-modules-extras | cloud/amazon/ec2_ami_copy.py | 63 | 6501 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_copy
short_description: copies AMI between AWS regions, return new image id
description:
- Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
version_added: "2.0"
options:
source_region:
description:
- the source region that AMI should be copied from
required: true
region:
description:
- the destination region that AMI should be copied to
required: true
aliases: ['aws_region', 'ec2_region', 'dest_region']
source_image_id:
description:
- the id of the image in source region that should be copied
required: true
name:
description:
- The name of the new image to copy
required: false
default: null
description:
description:
- An optional human-readable string describing the contents and purpose of the new AMI.
required: false
default: null
wait:
description:
- wait for the copied AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
required: false
default: 1200
tags:
description:
- a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
author: Amir Moulavi <amir.moulavi@gmail.com>
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic AMI Copy
- local_action:
module: ec2_ami_copy
source_region: eu-west-1
dest_region: us-east-1
source_image_id: ami-xxxxxxx
name: SuperService-new-AMI
description: latest patch
tags: '{"Name":"SuperService-new-AMI", "type":"SuperService"}'
wait: yes
register: image_id
'''
import sys
import time
try:
import boto
import boto.ec2
from boto.vpc import VPCConnection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
def copy_image(module, ec2):
"""
Copies an AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
source_region = module.params.get('source_region')
source_image_id = module.params.get('source_image_id')
name = module.params.get('name')
description = module.params.get('description')
tags = module.params.get('tags')
wait_timeout = int(module.params.get('wait_timeout'))
wait = module.params.get('wait')
try:
params = {'source_region': source_region,
'source_image_id': source_image_id,
'name': name,
'description': description
}
image_id = ec2.copy_image(**params).image_id
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
register_tags_if_any(module, ec2, tags, image_id)
module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
# register tags to the copied AMI in dest_region
def register_tags_if_any(module, ec2, tags, image_id):
if tags:
try:
ec2.create_tags([image_id], tags)
except Exception as e:
module.fail_json(msg=str(e))
# wait here until the image is copied (i.e. the state becomes available
def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="timed out waiting for image to be copied")
return img
# wait until the image is recognized.
def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
for i in range(wait_timeout):
try:
return ec2.get_image(image_id)
except boto.exception.EC2ResponseError, e:
# This exception we expect initially right after registering the copy with EC2 API
if 'InvalidAMIID.NotFound' in e.error_code and wait:
time.sleep(1)
else:
# On any other exception we should fail
module.fail_json(
msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
e))
else:
module.fail_json(msg="timed out waiting for image to be recognized")
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
source_region=dict(required=True),
source_image_id=dict(required=True),
name=dict(),
description=dict(default=""),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=1200),
tags=dict(type='dict')))
module = AnsibleModule(argument_spec=argument_spec)
try:
ec2 = ec2_connect(module)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
try:
region, ec2_url, boto_params = get_aws_connection_info(module)
vpc = connect_to_aws(boto.vpc, region, **boto_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if not region:
module.fail_json(msg="region must be specified")
copy_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
aaiyer/bugseverywhere | libbe/command/due.py | 4 | 4003 | # Copyright (C) 2009-2012 Chris Ball <cjb@laptop.org>
# W. Trevor King <wking@tremily.us>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import libbe
import libbe.command
import libbe.command.util
import libbe.util.utility
DUE_TAG = 'DUE:'
class Due (libbe.command.Command):
"""Set bug due dates
>>> import sys
>>> import libbe.bugdir
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = Due(ui=ui)
>>> ret = ui.run(cmd, args=['/a'])
No due date assigned.
>>> ret = ui.run(cmd, args=['/a', 'Thu, 01 Jan 1970 00:00:00 +0000'])
>>> ret = ui.run(cmd, args=['/a'])
Thu, 01 Jan 1970 00:00:00 +0000
>>> ret = ui.run(cmd, args=['/a', 'none'])
>>> ret = ui.run(cmd, args=['/a'])
No due date assigned.
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'due'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.args.extend([
libbe.command.Argument(
name='bug-id', metavar='BUG-ID',
completion_callback=libbe.command.util.complete_bug_id),
libbe.command.Argument(
name='due', metavar='DUE', optional=True),
])
def _run(self, **params):
bugdirs = self._get_bugdirs()
bugdir,bug,comment = (
libbe.command.util.bugdir_bug_comment_from_user_id(
bugdirs, params['bug-id']))
if params['due'] == None:
due_time = get_due(bug)
if due_time is None:
print >> self.stdout, 'No due date assigned.'
else:
print >> self.stdout, libbe.util.utility.time_to_str(due_time)
else:
if params['due'] == 'none':
remove_due(bug)
else:
due_time = libbe.util.utility.str_to_time(params['due'])
set_due(bug, due_time)
def _long_help(self):
return """
If no DATE is specified, the bug's current due date is printed. If
DATE is specified, it will be assigned to the bug.
"""
# internal helper functions
def _generate_due_string(time):
return "%s%s" % (DUE_TAG, libbe.util.utility.time_to_str(time))
def _parse_due_string(string):
assert string.startswith(DUE_TAG)
return libbe.util.utility.str_to_time(string[len(DUE_TAG):])
# functions exposed to other modules
def get_due(bug):
matched = []
for line in bug.extra_strings:
if line.startswith(DUE_TAG):
matched.append(_parse_due_string(line))
if len(matched) == 0:
return None
if len(matched) > 1:
raise Exception('Several due dates for %s?:\n %s'
% (bug.uuid, '\n '.join(matched)))
return matched[0]
def remove_due(bug):
estrs = bug.extra_strings
for due_str in [s for s in estrs if s.startswith(DUE_TAG)]:
estrs.remove(due_str)
bug.extra_strings = estrs # reassign to notice change
def set_due(bug, time):
remove_due(bug)
estrs = bug.extra_strings
estrs.append(_generate_due_string(time))
bug.extra_strings = estrs # reassign to notice change
| gpl-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/macpath.py | 17 | 6255 | """Pathname and path-related operations for the Macintosh."""
import os
import warnings
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not path) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
components = path.split(':')
path = components[0] + ':'
for c in components[1:]:
path = join(path, c)
try:
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
except Carbon.File.Error:
pass
return path
supports_unicode_filenames = True
| gpl-2.0 |
raccoongang/edx-platform | lms/djangoapps/verify_student/services.py | 5 | 1435 | """
Implementation of "reverification" service to communicate with Reverification XBlock
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from opaque_keys.edx.keys import CourseKey
from student.models import CourseEnrollment, User
from .models import SoftwareSecurePhotoVerification
log = logging.getLogger(__name__)
class VerificationService(object):
"""
Learner verification XBlock service
"""
def get_status(self, user_id):
"""
Returns the user's current photo verification status.
Args:
user_id: the user's id
Returns: one of the following strings
'none' - no such verification exists
'expired' - verification has expired
'approved' - verification has been approved
'pending' - verification process is still ongoing
'must_reverify' - verification has been denied and user must resubmit photos
"""
user = User.objects.get(id=user_id)
# TODO: provide a photo verification abstraction so that this
# isn't hard-coded to use Software Secure.
return SoftwareSecurePhotoVerification.user_status(user)
def reverify_url(self):
"""
Returns the URL for a user to verify themselves.
"""
return reverse('verify_student_reverify')
| agpl-3.0 |
unreal666/outwiker | src/outwiker/gui/preferences/mainwindowpanel.py | 2 | 3199 | # -*- coding: utf-8 -*-
import os
import wx
from . import configelements
from outwiker.core.system import getImagesDir
from outwiker.gui.guiconfig import MainWindowConfig
from outwiker.gui.controls.formatctrl import FormatCtrl
from outwiker.gui.preferences.baseprefpanel import BasePrefPanel
class MainWindowPanel(BasePrefPanel):
def __init__(self, parent, application):
super().__init__(parent)
self.mainWindowConfig = MainWindowConfig(application.config)
self._createGUI()
self.LoadState()
self.SetupScrolling()
def _createGUI(self):
main_sizer = wx.FlexGridSizer(cols=1)
main_sizer.AddGrowableCol(0)
self._createTitleFormatGUI(main_sizer)
self._createStatusbarGUI(main_sizer)
self.SetSizer(main_sizer)
def _createStatusbarGUI(self, main_sizer):
self.statusbarVisibleCheckBox = wx.CheckBox(
self,
label=_('Show status panel')
)
main_sizer.Add(self.statusbarVisibleCheckBox,
flag=wx.ALIGN_LEFT | wx.ALL,
border=2)
def _createTitleFormatGUI(self, main_sizer):
"""
Создать элементы интерфейса, связанные с форматом заголовка
главного окна
"""
hints = [
(u"{file}", _(u"Wiki file name")),
(u"{page}", _(u"Page title")),
(u"{subpath}", _(u"Relative path to current page")),
]
self.titleFormatLabel = wx.StaticText(self,
-1,
_("Main window title format"))
hintBitmap = wx.Bitmap(os.path.join(getImagesDir(), u"wand.png"))
self.titleFormatText = FormatCtrl(
self,
self.mainWindowConfig.titleFormat.value,
hints,
hintBitmap)
self.titleFormatSizer = wx.FlexGridSizer(1, 2, 0, 0)
self.titleFormatSizer.Add(self.titleFormatLabel,
0,
wx.ALL | wx.ALIGN_CENTER_VERTICAL,
2)
self.titleFormatSizer.Add(self.titleFormatText,
0,
wx.ALL | wx.EXPAND,
2)
self.titleFormatSizer.AddGrowableCol(1)
main_sizer.Add(self.titleFormatSizer, 1, wx.EXPAND, 0)
def LoadState(self):
"""
Загрузить состояние страницы из конфига
"""
# Формат заголовка страницы
self.titleFormat = configelements.StringElement(
self.mainWindowConfig.titleFormat,
self.titleFormatText
)
self.statusbarVisible = configelements.BooleanElement(
self.mainWindowConfig.statusbar_visible,
self.statusbarVisibleCheckBox
)
def Save(self):
"""
Сохранить состояние страницы в конфиг
"""
self.titleFormat.save()
self.statusbarVisible.save()
| gpl-3.0 |
colloquium/spacewalk | client/rhel/rhn-client-tools/src/bin/rhnreg_ks.py | 1 | 7963 | #!/usr/bin/python
#
# Registration client for the Red Hat Network for useage with kickstart
# Copyright (c) 1999-2006 Red Hat, Inc. Distributed under GPL.
#
# Authors:
# Adrian Likins <alikins@redhat.com>
# James Bowes <jbowes@redhat.com>
#
# see the output of "--help" for the valid options.
#
# The contact info is in the form or a "key: value" one per line.
# valid keys are:
# reg_num, title, first_name, last_name, company,
# position, address1, address2, city, state, zip,
# country, phone, fax, contact_email, contact_mail,
# contact_phone, contact_fax, contact_special,
# contact_newsletter
#
#
#
import sys
import os
import gettext
_ = gettext.gettext
sys.path.append("/usr/share/rhn/")
from up2date_client import rhnreg
from up2date_client import hardware
from up2date_client import rpmUtils
from up2date_client import up2dateErrors
from up2date_client import rhncli
class RegisterKsCli(rhncli.RhnCli):
def __init__(self):
super(RegisterKsCli, self).__init__()
self.optparser.add_option("--profilename", action="store",
help=_("Specify a profilename")),
self.optparser.add_option("--username", action="store",
help=_("Specify a username")),
self.optparser.add_option("--password", action="store",
help=_("Specify a password")),
self.optparser.add_option("--systemorgid", action="store",
help=_("Specify an organizational id for this system")),
self.optparser.add_option("--serverUrl", action="store",
help=_("Specify a url to use as a server")),
self.optparser.add_option("--sslCACert", action="store",
help=_("Specify a file to use as the ssl CA cert")),
self.optparser.add_option("--activationkey", action="store",
help=_("Specify an activation key")),
self.optparser.add_option("--use-eus-channel", action="store_true",
help=_("Subscribe this system to the EUS channel tied to the system's redhat-release")),
self.optparser.add_option("--contactinfo", action="store_true",
default=False, help=_("[Deprecated] Read contact info from stdin")),
self.optparser.add_option("--nohardware", action="store_true",
default=False, help=_("Do not probe or upload any hardware info")),
self.optparser.add_option("--nopackages", action="store_true",
default=False, help=_("Do not profile or upload any package info")),
self.optparser.add_option("--novirtinfo", action="store_true",
default=False, help=_("Do not upload any virtualization info")),
self.optparser.add_option("--norhnsd", action="store_true",
default=False, help=_("Do not start rhnsd after completion")),
self.optparser.add_option("--force", action="store_true", default=False,
help=_("Register the system even if it is already registered")),
def main(self):
if self.options.serverUrl:
rhnreg.cfg.set("serverURL", self.options.serverUrl)
if self.options.sslCACert:
rhnreg.cfg.set("sslCACert", self.options.sslCACert)
if not (self.options.activationkey or
(self.options.username and self.options.password)):
print _("A username and password are required "\
"to register a system.")
sys.exit(-1)
if rhnreg.registered() and not self.options.force:
print _("This system is already registered. Use --force to override")
sys.exit(-1)
rhnreg.getCaps()
if not self.options.nopackages:
getArch = 0
if rhnreg.cfg['supportsExtendedPackageProfile']:
getArch = 1
packageList = rpmUtils.getInstalledPackageList(getArch=getArch)
else:
packageList = []
hardwareList = hardware.Hardware()
if self.options.profilename:
profilename = self.options.profilename
else:
profilename = RegisterKsCli.__generateProfileName(hardwareList)
other = {}
if self.options.systemorgid:
other['org_id'] = self.options.systemorgid
# Try to get the virt uuid and put it in "other".
(virt_uuid, virt_type) = rhnreg.get_virt_info()
if not virt_uuid is None:
other['virt_uuid'] = virt_uuid
other['virt_type'] = virt_type
# If specified, send up the EUS channel label for subscription.
if self.options.use_eus_channel:
if self.options.activationkey:
print _("Usage of --use-eus-channel option with --activationkey is not supported. Please use username and password instead.")
sys.exit(-1)
if not rhnreg.server_supports_eus():
print _("The server you are registering against does not support EUS.")
sys.exit(-1)
channels = rhnreg.getAvailableChannels(self.options.username,
self.options.password)
other['channel'] = channels['default_channel']
try:
if self.options.activationkey:
systemId = rhnreg.registerSystem(token = self.options.activationkey,
profileName = profilename,
other = other)
else:
systemId = rhnreg.registerSystem(self.options.username,
self.options.password, profilename, other = other)
except (up2dateErrors.AuthenticationTicketError,
up2dateErrors.RhnUuidUniquenessError,
up2dateErrors.CommunicationError,
up2dateErrors.AuthenticationOrAccountCreationError), e:
print "%s" % e.errmsg
sys.exit(1)
# collect hardware info, inluding hostname
if not self.options.nohardware:
rhnreg.sendHardware(systemId, hardwareList)
if not self.options.nopackages:
rhnreg.sendPackages(systemId, packageList)
if self.options.contactinfo:
print _("Warning: --contactinfo option has been deprecated. Please login to the server web user Interface and update your contactinfo. ")
# write out the new id
if isinstance(systemId, unicode):
rhnreg.writeSystemId(unicode.encode(systemId, 'utf-8'))
else:
rhnreg.writeSystemId(systemId)
# assume successful communication with server
# remember to save the config options
rhnreg.cfg.save()
# Send virtualization information to the server. We must do this
# *after* writing out the system id.
if not self.options.novirtinfo:
rhnreg.sendVirtInfo(systemId)
# do this after writing out system id, bug #147513
if not self.options.norhnsd:
rhnreg.startRhnsd()
RegisterKsCli.__runRhnCheck()
@staticmethod
def __generateProfileName(hardwareList):
hostname = None
ipaddr = None
profileName = None
for hw in hardwareList:
if hw['class'] == 'NETINFO':
hostname = hw.get('hostname')
ipaddr = hw.get('ipaddr')
if hostname:
profileName = hostname
else:
if ipaddr:
profileName = ipaddr
if not profileName:
print _("A profilename was not specified, "\
"and hostname and IP address could not be determined "\
"to use as a profilename, please specify one.")
sys.exit(-1)
return profileName
@staticmethod
def __runRhnCheck():
os.system("/usr/sbin/rhn_check")
if __name__ == "__main__":
cli = RegisterKsCli()
cli.run()
| gpl-2.0 |
darktears/chromium-crosswalk | tools/grit/grit/tool/postprocess_unittest.py | 61 | 1851 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit test that checks postprocessing of files.
Tests postprocessing by having the postprocessor
modify the grd data tree, changing the message name attributes.
'''
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import grit.tool.postprocess_interface
from grit.tool import rc2grd
class PostProcessingUnittest(unittest.TestCase):
def testPostProcessing(self):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor'
result = tool.Process(rctext, '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1')
self.failUnless(
result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2')
class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
'''
Post processing replaces all message name attributes containing "DUMMY" to
"SMART".
'''
def Process(self, rctext, rcpath, grdnode):
smarter = re.compile(r'(DUMMY)(.*)')
messages = grdnode.children[2].children[2]
for node in messages.children:
name_attr = node.attrs['name']
m = smarter.search(name_attr)
if m:
node.attrs['name'] = 'SMART' + m.group(2)
return grdnode
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
DataBiosphere/data-explorer | api/data_explorer/test/test_elasticsearch_util.py | 1 | 1045 | from data_explorer.util import elasticsearch_util
def test_convert_to_index_name():
dataset_name = "Project Baseline"
assert "project_baseline" == elasticsearch_util.convert_to_index_name(
dataset_name)
def test_range_to_number():
def _inner(range_str, expected_number):
actual_number = elasticsearch_util.range_to_number(range_str)
assert expected_number == actual_number
_inner('0.1-0.2', 0.1)
_inner('1', 1)
_inner('10-20', 10)
_inner('1.0M-1.9M', 1000000)
_inner('10M-20M', 10000000)
_inner('-20--10', -20)
_inner('-10-0', -10)
def test_number_to_range():
def _inner(interval_start, interval, expected_range):
actual_range = elasticsearch_util.number_to_range(
interval_start, interval)
assert expected_range == actual_range
_inner(.1, .1, '0.1-0.2')
_inner(1, 1, '1')
_inner(10, 10, '10-19')
_inner(1000000, 1000000, '1.0M-1.9M')
_inner(10000000, 10000000, '10M-19M')
_inner(10000000000, 10000000000, '10B-19B')
| bsd-3-clause |
ArteliaTelemac/PostTelemac | PostTelemac/meshlayerparsers/libs_telemac/parsers/parserKenue.py | 2 | 15448 | """@author Sebastien E. Bourban
"""
"""@note ... this work is based on a collaborative effort between
.________. ,--.
| | . ( (
|,-. / HR Wallingford EDF - LNHE / \_ \_/ .--.
/ \ / Howbery Park, 6, quai Watier \ ) /_ )
,. `' Wallingford, Oxfordshire 78401 Cedex `-'_ __ `--
/ \ / OX10 8BA, United Kingdom Chatou, France __/ \ \ `.
/ `-'| www.hrwallingford.com innovation.edf.com | ) ) )
!________! `--' `--
"""
"""@brief
Tools for handling Kenue native files in python.
Kenue and its related software (Blue Kenue, Greem Kenue, )
are property of the NRC Canadian Hydrualics Centre
"""
"""@details
Contains getI2S, getI3S and putI2S, putI3S, which read/write
python variables into ASCII I2S and I3S files
"""
"""@history 26/12/2011 -- Sebastien E. Bourban:
First trial at parsing I2S and I3S
"""
"""@history 09/01/2012 -- Sebastien E. Bourban:
Addition of XY and XYZ parsing
"""
"""@history 13/01/2012 -- Sebastien E. Bourban:
Creates InS class with associated methods including:
+ removeDuplicates (remove duplicated points based on proximity)
+ makeClockwise (make closed loops clockwise)
+ makeAntiClockwise (make closed loops anti-clockwise)
+ smoothSubdivise (add points and weigthed average move)
+ smoothSubsampleDistance (remove points based on proximity)
+ smoothSubsampleAngle (remove points based on flatness)
"""
"""@history 29/02/2012 -- Sebastien E. Bourban:
The type of a contour can now be checked automatically so that
mixed set can be stored into one I2S/I3S file. To use this
feature, type must be absent from the putInS call.
"""
# _____ ___________________________________________________
# ____/ Imports /__________________________________________________/
#
# ~~> dependencies towards standard python
import sys
import re
import numpy as np
from os import path
# ~~> dependencies towards other pytel/modules
from ..utilstelemac.files import getFileContent, putFileContent
from ..utilstelemac.progressbar import ProgressBar
from ..utilstelemac.geometry import isClose
# _____ __________________________________________
# ____/ Global Variables /_________________________________________/
#
ken_header = re.compile(r"[#:]")
asc_FileType = re.compile(r":FileType\s(?P<type>\b\w\w\w\b)") # \s(?P<after>.*?)\Z')
asc_AttributeName = re.compile(r":AttributeName\s(?P<number>\b(|[^a-zA-Z(,])(?:(\d+)(\b|[^a-zA-Z,)])))(?P<after>.*?)\Z")
var_1int = re.compile(
r'(?P<before>[^\'"]*?)\b(?P<number>[+-]?(|[^a-zA-Z(,])(?:(\d+)(\b|[^a-zA-Z,)])))(?P<after>[^.].*?)\Z'
)
var_1dbl = re.compile(
r"(?P<number>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))[\s,;]*(?P<after>.*?)\Z"
)
var_2dbl = re.compile(
r"(?P<number1>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))[\s,;]*(?P<number2>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))(?P<after>.*?)\Z"
)
var_3dbl = re.compile(
r"(?P<number1>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))[\s,;]*(?P<number2>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))[\s,;]*(?P<number3>[+-]?(|[^a-zA-Z(,])(?:(\d+(|\.)\d*[dDeE](\+|\-)?\d+|\d+\.\d+)(\b|[^a-zA-Z,)])))(?P<after>.*?)\Z"
)
var_1str = re.compile(r'(?P<string>)(".*?")[\s,;]*(?P<after>.*?)\Z')
# _____ ___________________________________________
# ____/ General Toolbox /__________________________________________/
#
def cleanSpaces(istr): # same as in parserFortran
return istr.strip().replace(" ", " ").replace(" ", " ")
# _____ _______________________________
# ____/ Principal Class for I2S/I3S /______________________________/
#
"""
self.poly is a numpy object, while self.type is not.
"""
class InS:
def __init__(self, fileName):
# file parsing is based on the name of the extension
_, tail = path.splitext(fileName)
# ~~> Case of a Kenue type i2s/i3s file
if tail in [".i2s", ".i3s"]:
self.parseContent(fileName)
else:
print "\nThe polygon file extension is required to be either i2s or i3s"
sys.exit(1)
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Parse Content ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def parseContent(self, fileName):
# TODO: Read the whole header
# ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
core = getFileContent(fileName)
# ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
icore = 0
self.atrbut = {}
self.natrbut = 0
self.oatrbut = []
while re.match(ken_header, core[icore].strip()):
# ~~> instruction FileType
proc = re.match(asc_FileType, core[icore].strip())
if proc:
self.fileType = proc.group("type").lower()
# ~~> instruction AttributeName
proc = re.match(asc_AttributeName, core[icore].strip())
if proc:
self.natrbut += 1
if self.natrbut == int(proc.group("number")):
self.oatrbut.append(proc.group("after").strip())
self.atrbut.update({self.oatrbut[-1]: []})
else:
print "... Could not read the order of your Attributes:", core[icore]
sys.exit(1)
# ... more instruction coming ...
icore += 1
self.head = core[0:icore]
# /!\ icore starts the body
# ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This is also fairly fast, so you might not need a progress bar
self.poly = []
self.vals = []
self.type = []
self.npoin = 0
while icore < len(core):
if core[icore].strip() == "":
icore += 1
continue
# ~~> polygon head
proc = re.match(var_1int, core[icore].strip())
if not proc:
print "\nCould not parse the following polyline header: " + core[icore].strip()
sys.exit(1)
nrec = int(proc.group("number"))
a = proc.group("after").strip().split()
if len(a) != self.natrbut:
if self.natrbut != 0:
print "... Could not find the correct number of attribute:", core[
icore
].strip(), ", ", self.natrbut, " expected"
sys.exit(1)
else:
self.natrbut = len(a)
self.oatrbut = range(1, len(a) + 1)
self.atrbut = dict([(i + 1, [a[i]]) for i in range(len(a))])
else:
for i in range(len(self.oatrbut)):
self.atrbut[self.oatrbut[i]].append(a[i])
xyi = []
val = []
icore += 1
for irec in range(nrec):
nbres = core[icore + irec].strip().split()
proc = re.match(var_1dbl, nbres[0])
if not proc:
proc = re.match(var_1int, nbres[0])
if not proc:
print "\nCould not parse the following polyline record: " + core[icore + irec].strip()
sys.exit(1)
nbres[0] = float(proc.group("number"))
procd = re.match(var_1dbl, nbres[1])
proci = re.match(var_1int, nbres[1])
if procd:
nbres[1] = float(procd.group("number"))
elif proci:
nbres[1] = float(procd.group("number"))
xyi.append(nbres[:2])
val.append(nbres[2:])
if xyi != []:
cls = 0
if isClose(xyi[0], xyi[len(xyi) - 1], size=10):
xyi.pop(len(xyi) - 1)
val.pop(len(val) - 1)
cls = 1
self.poly.append(np.asarray(xyi, dtype=np.float))
self.vals.append(np.asarray(val, dtype=np.float))
self.type.append(cls)
self.npoin += len(xyi)
icore += nrec
self.npoly = len(self.poly)
# ~~ Parse attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# for o in self.oatrbut:
# proc = re.match(var_1int,self.atrbut[o][0])
# if not proc:
# proc = re.match(var_1dbl,self.atrbut[o][0])
# if proc: self.atrbut[o] = np.array( self.atrbut[o], dtype=np.float )
# else: self.atrbut[o] = np.array( self.atrbut[o], dtype=np.int )
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Write-up Content ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Note:
# + file parsing is based on the name of the extension
#
def putContent(self, fileName, head=[]):
# ~~> file extension processing
_, tail = path.splitext(fileName)
if tail[1:] != self.fileType:
if head != []:
head = ["\n".join(head).replace(":FileType " + self.fileType, ":FileType " + tail[1:])]
self.fileType = tail[1:]
# ~~> write head
if head != []:
core = head
else:
core = [
":FileType " + self.fileType + " ASCII EnSim 1.0",
":Application BlueKenue",
":Version 3.2.24",
":WrittenBy sebourban",
":CreationDate Thu, Dec 08, 2011 02:47 PM",
":Name " + path.basename(fileName),
#':AttributeName 1 level',
#':AttributeType 1 float',
#':AttributeUnits 1 m',
":EndHeader",
]
# ~~> look for closed lines
if self.type == []:
for ip in self.poly:
if isClose(ip[0][:2], ip[len(ip) - 1][:2]):
self.type.append(1)
else:
self.type.append(0)
# ~~> fill-up empty attributes
if self.atrbut == {}:
self.atrbut = {1: ["ArbitraryName1"]}
for _ in self.poly:
self.atrbut[1].append(0)
self.oatrbut = [1]
# ~~> fill-up attribute names
if head == []:
for i, o in zip(range(len(self.oatrbut)), self.oatrbut):
core.insert(-1, ":AttributeName " + repr(i + 1) + " " + repr(o))
# ~~ Write body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for i, ip, iv, it in zip(range(len(self.poly)), self.poly, self.vals, self.type):
il = len(ip)
if il != 0 and not isClose(ip[0], ip[len(ip) - 1]):
il += it
line = repr(il)
for o in self.oatrbut:
line = line + " " + str(self.atrbut[o][i])
core.append(line)
if self.fileType == "i2s":
for xyi in ip:
core.append(repr(xyi[0]) + " " + repr(xyi[1]))
if il != len(ip):
core.append(repr(ip[0][0]) + " " + repr(ip[0][1]))
elif self.fileType == "i3s":
if np.shape(iv)[1] == 0:
for xyi in ip:
core.append(repr(xyi[0]) + " " + repr(xyi[1]) + " 0.0")
if il != len(ip):
core.append(repr(ip[0][0]) + " " + repr(ip[0][1]) + " 0.0")
else:
for xyi, val in zip(ip, iv):
core.append(repr(xyi[0]) + " " + repr(xyi[1]) + " " + " ".join([repr(v) for v in val]))
if il != len(ip):
core.append(repr(ip[0][0]) + " " + repr(ip[0][1]) + " " + " ".join([repr(v) for v in iv[0]]))
# ~~ Put all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
putFileContent(fileName, core)
# _____ ___________________________________________
# ____/ Toolbox for XYZ /__________________________________________/
#
def getXYn(file):
# TODO: Read the whole header, for the time being head is copied
# over
# TODO: Read multiple variables depending on type and on a list
# ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
core = getFileContent(file)
# ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
icore = 0
fileType = None
while re.match(ken_header, core[icore]):
# ~~> instruction FileType
proc = re.match(asc_FileType, core[icore])
if proc:
fileType = proc.group("type").lower()
# ... more instruction coming ...
icore += 1
head = core[0:icore]
if fileType == None:
proc = re.match(var_3dbl, core[icore] + " ")
if not proc:
proc = re.match(var_2dbl, core[icore] + " ")
if not proc:
print "\nCould not parse the first record: " + core[icore]
sys.exit(1)
else:
fileType = "xy"
else:
fileType = "xyz"
# /!\ icore starts the body
# ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This is also fairly fast, so you might not need a progress bar
xyz = [] # ; pbar = ProgressBar(maxval=len(core)).start()
while icore < len(core):
if fileType == "xy":
proc = re.match(var_2dbl, core[icore] + " ")
if not proc:
print "\nCould not parse the following xyz record: " + core[icore]
sys.exit(1)
xyz.append([float(proc.group("number1")), float(proc.group("number2"))])
elif fileType == "xyz":
proc = re.match(var_3dbl, core[icore] + " ")
if not proc:
print "\nCould not parse the following xyz record: " + core[icore]
sys.exit(1)
xyz.append([float(proc.group("number1")), float(proc.group("number2")), float(proc.group("number3"))])
icore += 1
# pbar.finish()
return head, fileType, xyz
def putXYn(file, head, fileType, xyz):
# ~~ Write head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
core = head
# if head != []: core = head
# <else: core = [':FileType '+fileType+' ASCII EnSim 1.0',
# ':Application BlueKenue', ':Version 3.2.24',
# ':WrittenBy sebourban', ':CreationDate Thu, Dec 08, 2011 02:47 PM',
# ':Name ' + path.basename(file),
# ':AttributeUnits 1 m',
# ':EndHeader' ]
# ~~ Write body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if fileType == "xy":
for _ in xyz:
core.append(str(xyz[0]) + " " + str(xyz[1]))
elif fileType == "xyz":
for _ in xyz:
core.append(str(xyz[0]) + " " + str(xyz[1]) + " " + str(xyz[2]))
# ~~ Put all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
putFileContent(file, core)
return
| gpl-3.0 |
pombreda/seascope | src/web/backend/plugins/idutils/IdutilsProject.py | 2 | 4362 | #!/usr/bin/python
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
import sys, os, string, re
from ..PluginBase import PluginFeatureBase, ProjectBase, ConfigBase, QueryBase
from ..PluginBase import PluginProcess
from ..CtagsCache import CtagsThread
class IdutilsFeature(PluginFeatureBase):
def __init__(self):
PluginFeatureBase.__init__(self)
self.feat_desc = [
['REF', ''],
['DEF', ''],
#['<--', '2'],
['-->', '3'],
#['TXT', '4'],
['GREP', ''],
['FIL', ''],
['INC', '8'],
['QDEF', ''],
['CTREE', '12'],
['CLGRAPH', '13'],
['CLGRAPHD','14'],
['FFGRAPH', '14'],
['UPD', '25'],
]
self.ctree_query_args = [
['-->', '--> F', 'Calling tree' ],
#['<--', 'F -->', 'Called tree' ],
['REF', '==> F', 'Advanced calling tree' ],
]
def query_dlg_cb(self, req, cmd_str, in_opt):
opt = []
if cmd_str != 'TXT' and req != '' and in_opt['substring']:
opt.append('substring')
if cmd_str == 'FIL':
req = '*' + req + '*'
else:
req = '.*' + req + '.*'
if in_opt['ignorecase']:
opt.append('ignorecase')
res = (cmd_str, req, opt)
return res
class ConfigIdutils(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, 'idutils')
class ProjectIdutils(ProjectBase):
def __init__(self):
ProjectBase.__init__(self)
@staticmethod
def _prj_new_or_open(conf):
prj = ProjectIdutils()
prj.feat = IdutilsFeature()
prj.conf = conf
prj.qry = QueryIdutils(prj.conf, prj.feat)
return (prj)
@staticmethod
def prj_new(proj_args):
d = proj_args[0]
prj = ProjectIdutils.prj_open(d)
return prj
@staticmethod
def prj_open(proj_path):
conf = ConfigIdutils()
conf.proj_open(proj_path)
prj = ProjectIdutils._prj_new_or_open(conf)
return (prj)
class IdProcess(PluginProcess):
def __init__(self, wdir, rq):
PluginProcess.__init__(self, wdir, rq)
self.name = 'idutils process'
def parse_result(self, text, sig):
if self.cmd_str in ['GREP']:
return PluginProcess.parse_result(self, text, sig)
#from datetime import datetime
#t1 = datetime.now()
text = re.split('\r?\n', text)
#t2 = datetime.now()
#print 'parse-split', t2 - t1
if self.cmd_str == 'FIL':
res = [ ['', os.path.join(self.wdir, line), '', '' ] for line in text if line != '' ]
return res
res = []
for line in text:
if line == '':
continue
line = line.split(':', 2)
line = ['<unknown>', os.path.join(self.wdir, line[0]), line[1], line[2]]
res.append(line)
#t3 = datetime.now()
#print 'parse-loop', t3 - t2
res = CtagsThread(sig).apply_fix(self.cmd_str, res, ['<unknown>'])
return res
idutils_lid_cmd = os.getenv('SEASCOPE_IDUTILS_LID_CMD', 'lid')
class QueryIdutils(QueryBase):
def __init__(self, conf, feat):
QueryBase.__init__(self)
self.conf = conf
self.feat = feat
def query(self, rquery):
if (not self.conf):
#or not self.conf.is_ready()):
print "pm_query not is_ready"
return None
cmd_str = rquery['cmd']
if cmd_str in ['GREP']:
return QueryBase.query(self, rquery)
req = rquery['req']
opt = rquery['opt']
if opt == None:
opt = []
pargs = [idutils_lid_cmd, '-R', 'grep']
if cmd_str == 'FIL':
pargs = ['fnid', '-S', 'newline']
#elif cmd_str == 'TXT':
#pargs += ['-l']
elif cmd_str in ['-->', '<--']:
pargs += ['-l']
if 'substring' in opt:
if cmd_str == 'FIL':
req = '*' + req + '*'
else:
req = '.*' + req + '.*'
#pargs += ' -s'
if 'ignorecase' in opt:
if cmd_str == 'FIL':
pass
else:
pargs += ['-i']
pargs += [ '--', req ]
qsig = IdProcess(self.conf.c_dir, [cmd_str, req]).run_query_process(pargs, req, rquery)
return qsig
def rebuild(self):
if (not self.conf.is_ready()):
print "pm_query not is_ready"
return None
pargs = os.getenv('SEASCOPE_IDUTILS_MKID_CMD', '').strip().split()
if not len(pargs):
pargs = [ 'mkid', '-s' ]
qsig = IdProcess(self.conf.c_dir, None).run_rebuild_process(pargs)
return qsig
def query_fl(self):
if not os.path.exists(os.path.join(self.conf.c_dir, 'ID')):
return []
pargs = [ 'fnid', '-S', 'newline', '-f', 'ID' ]
qsig = IdProcess(self.conf.c_dir, None).run_query_fl(pargs)
return qsig
def id_is_open(self):
return self.conf != None
def id_is_ready(self):
return self.conf.is_ready()
| bsd-3-clause |
aclifton/cpeg853-gem5 | configs/dram/lat_mem_rd.py | 4 | 10356 | # Copyright (c) 2015-2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
import gzip
import optparse
import os
import m5
from m5.objects import *
from m5.util import addToPath
from m5.internal.stats import periodicStatDump
addToPath('../')
from common import MemConfig
addToPath('../../util')
import protolib
# this script is helpful to observe the memory latency for various
# levels in a cache hierarchy, and various cache and memory
# configurations, in essence replicating the lmbench lat_mem_rd thrash
# behaviour
# import the packet proto definitions, and if they are not found,
# attempt to generate them automatically
try:
import packet_pb2
except:
print "Did not find packet proto definitions, attempting to generate"
from subprocess import call
error = call(['protoc', '--python_out=configs/dram',
'--proto_path=src/proto', 'src/proto/packet.proto'])
if not error:
print "Generated packet proto definitions"
try:
import google.protobuf
except:
print "Please install the Python protobuf module"
exit(-1)
import packet_pb2
else:
print "Failed to import packet proto definitions"
exit(-1)
parser = optparse.OptionParser()
parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-size", action="store", type="string",
default="16MB",
help="Specify the memory size")
parser.add_option("--reuse-trace", action="store_true",
help="Prevent generation of traces and reuse existing")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# start by creating the system itself, using a multi-layer 2.0 GHz
# crossbar, delivering 64 bytes / 3 cycles (one header cycle) which
# amounts to 42.7 GByte/s per layer and thus per port
system = System(membus = SystemXBar(width = 32))
system.clk_domain = SrcClockDomain(clock = '2.0GHz',
voltage_domain =
VoltageDomain(voltage = '1V'))
mem_range = AddrRange(options.mem_size)
system.mem_ranges = [mem_range]
# do not worry about reserving space for the backing store
system.mmap_using_noreserve = True
# currently not exposed as command-line options, set here for now
options.mem_channels = 1
options.mem_ranks = 1
options.external_memory_system = 0
options.tlm_memory = 0
options.elastic_trace_en = 0
MemConfig.config_mem(options, system)
# there is no point slowing things down by saving any data
for ctrl in system.mem_ctrls:
ctrl.null = True
# the following assumes that we are using the native DRAM
# controller, check to be sure
if isinstance(ctrl, m5.objects.DRAMCtrl):
# make the DRAM refresh interval sufficiently infinite to avoid
# latency spikes
ctrl.tREFI = '100s'
# use the same concept as the utilisation sweep, and print the config
# so that we can later read it in
cfg_file_name = os.path.join(m5.options.outdir, "lat_mem_rd.cfg")
cfg_file = open(cfg_file_name, 'w')
# set an appropriate burst length in bytes
burst_size = 64
system.cache_line_size = burst_size
# lazy version to check if an integer is a power of two
def is_pow2(num):
return num != 0 and ((num & (num - 1)) == 0)
# assume we start every range at 0
max_range = int(mem_range.end)
# start at a size of 4 kByte, and go up till we hit the max, increase
# the step every time we hit a power of two
min_range = 4096
ranges = [min_range]
step = 1024
while ranges[-1] < max_range:
new_range = ranges[-1] + step
if is_pow2(new_range):
step *= 2
ranges.append(new_range)
# how many times to repeat the measurement for each data point
iterations = 2
# 150 ns in ticks, this is choosen to be high enough that transactions
# do not pile up in the system, adjust if needed
itt = 150 * 1000
# for every data point, we create a trace containing a random address
# sequence, so that we can play back the same sequence for warming and
# the actual measurement
def create_trace(filename, max_addr, burst_size, itt):
try:
proto_out = gzip.open(filename, 'wb')
except IOError:
print "Failed to open ", filename, " for writing"
exit(-1)
# write the magic number in 4-byte Little Endian, similar to what
# is done in src/proto/protoio.cc
proto_out.write("gem5")
# add the packet header
header = packet_pb2.PacketHeader()
header.obj_id = "lat_mem_rd for range 0:" + str(max_addr)
# assume the default tick rate (1 ps)
header.tick_freq = 1000000000000
protolib.encodeMessage(proto_out, header)
# create a list of every single address to touch
addrs = range(0, max_addr, burst_size)
import random
random.shuffle(addrs)
tick = 0
# create a packet we can re-use for all the addresses
packet = packet_pb2.Packet()
# ReadReq is 1 in src/mem/packet.hh Command enum
packet.cmd = 1
packet.size = int(burst_size)
for addr in addrs:
packet.tick = long(tick)
packet.addr = long(addr)
protolib.encodeMessage(proto_out, packet)
tick = tick + itt
proto_out.close()
# this will take a while, so keep the user informed
print "Generating traces, please wait..."
nxt_range = 0
nxt_state = 0
period = long(itt * (max_range / burst_size))
# now we create the states for each range
for r in ranges:
filename = os.path.join(m5.options.outdir,
'lat_mem_rd%d.trc.gz' % nxt_range)
if not options.reuse_trace:
# create the actual random trace for this range
create_trace(filename, r, burst_size, itt)
# the warming state
cfg_file.write("STATE %d %d TRACE %s 0\n" %
(nxt_state, period, filename))
nxt_state = nxt_state + 1
# the measuring states
for i in range(iterations):
cfg_file.write("STATE %d %d TRACE %s 0\n" %
(nxt_state, period, filename))
nxt_state = nxt_state + 1
nxt_range = nxt_range + 1
cfg_file.write("INIT 0\n")
# go through the states one by one
for state in range(1, nxt_state):
cfg_file.write("TRANSITION %d %d 1\n" % (state - 1, state))
cfg_file.write("TRANSITION %d %d 1\n" % (nxt_state - 1, nxt_state - 1))
cfg_file.close()
# create a traffic generator, and point it to the file we just created
system.tgen = TrafficGen(config_file = cfg_file_name,
progress_check = '10s')
# add a communication monitor
system.monitor = CommMonitor()
# connect the traffic generator to the system
system.tgen.port = system.monitor.slave
# create the actual cache hierarchy, for now just go with something
# basic to explore some of the options
from common.Caches import *
# a starting point for an L3 cache
class L3Cache(Cache):
assoc = 16
hit_latency = 40
response_latency = 40
mshrs = 32
tgts_per_mshr = 12
write_buffers = 16
# note that everything is in the same clock domain, 2.0 GHz as
# specified above
system.l1cache = L1_DCache(size = '64kB')
system.monitor.master = system.l1cache.cpu_side
system.l2cache = L2Cache(size = '512kB', writeback_clean = True)
system.l2cache.xbar = L2XBar()
system.l1cache.mem_side = system.l2cache.xbar.slave
system.l2cache.cpu_side = system.l2cache.xbar.master
# make the L3 mostly exclusive, and correspondingly ensure that the L2
# writes back also clean lines to the L3
system.l3cache = L3Cache(size = '4MB', clusivity = 'mostly_excl')
system.l3cache.xbar = L2XBar()
system.l2cache.mem_side = system.l3cache.xbar.slave
system.l3cache.cpu_side = system.l3cache.xbar.master
system.l3cache.mem_side = system.membus.slave
# connect the system port even if it is not used in this example
system.system_port = system.membus.slave
# every period, dump and reset all stats
periodicStatDump(period)
# run Forrest, run!
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
m5.instantiate()
m5.simulate(nxt_state * period)
# print all we need to make sense of the stats output
print "lat_mem_rd with %d iterations, ranges:" % iterations
for r in ranges:
print r
| bsd-3-clause |
ajaxsys/dict-admin | docutils/parsers/rst/languages/lt.py | 2 | 3600 | # -*- coding: utf8 -*-
# $Id: lt.py 6459 2010-10-29 22:07:34Z milde $
# Author: Dalius Dobravolskas <dalius.do...@gmail.com>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Lithuanian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'dėmesio': 'attention',
u'atsargiai': 'caution',
u'pavojinga': 'danger',
u'klaida': 'error',
u'užuomina': 'hint',
u'svarbu': 'important',
u'pastaba': 'note',
u'patarimas': 'tip',
u'įspėjimas': 'warning',
u'perspėjimas': 'admonition',
u'šoninė-juosta': 'sidebar',
u'tema': 'topic',
u'linijinis-blokas': 'line-block',
u'išanalizuotas-literalas': 'parsed-literal',
u'rubrika': 'rubric',
u'epigrafas': 'epigraph',
u'pagridiniai-momentai': 'highlights',
u'atitraukta-citata': 'pull-quote',
u'sudėtinis-darinys': 'compound',
u'konteineris': 'container',
#'questions': 'questions',
u'lentelė': 'table',
u'csv-lentelė': 'csv-table',
u'sąrašo-lentelė': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
u'matematika': 'math',
#'imagemap': 'imagemap',
u'paveiksliukas': 'image',
u'iliustracija': 'figure',
u'pridėti': 'include',
u'žalia': 'raw',
u'pakeisti': 'replace',
u'unikodas': 'unicode',
u'data': 'date',
u'klasė': 'class',
u'rolė': 'role',
u'numatytoji-rolė': 'default-role',
u'titulas': 'title',
u'turinys': 'contents',
u'seknum': 'sectnum',
u'sekcijos-numeravimas': 'sectnum',
u'antraštė': 'header',
u'poraštė': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'nutaikytos-pastaba': 'target-notes',
u'restructuredtext-testinė-direktyva': 'restructuredtext-test-directive'}
"""Lithuanian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
'santrumpa': 'abbreviation',
'sa': 'abbreviation',
'akronimas': 'acronym',
'ak': 'acronym',
'indeksas': 'index',
'i': 'index',
u'apatinis-indeksas': 'subscript',
'sub': 'subscript',
u'viršutinis-indeksas': 'superscript',
'sup': 'superscript',
u'antrašės-nuoroda': 'title-reference',
u'antraštė': 'title-reference',
'a': 'title-reference',
'pep-nuoroda': 'pep-reference',
'pep': 'pep-reference',
'rfc-nuoroda': 'rfc-reference',
'rfc': 'rfc-reference',
u'paryškinimas': 'emphasis',
u'sustiprintas': 'strong',
u'literalas': 'literal',
u'matematika': 'math',
u'vardinė-nuoroda': 'named-reference',
u'anoniminė-nuoroda': 'anonymous-reference',
u'išnašos-nuoroda': 'footnote-reference',
u'citatos-nuoroda': 'citation-reference',
u'pakeitimo-nuoroda': 'substitution-reference',
u'taikinys': 'target',
u'uri-nuoroda': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'žalia': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
| bsd-3-clause |
autosportlabs/kivy | examples/3Drendering/main.py | 12 | 2396 | '''
3D Rotating Monkey Head
========================
This example demonstrates using OpenGL to display a rotating monkey head. This
includes loading a Blender OBJ file, shaders written in OpenGL's Shading
Language (GLSL), and using scheduled callbacks.
The monkey.obj file is an OBJ file output from the Blender free 3D creation
software. The file is text, listing vertices and faces and is loaded
using a class in the file objloader.py. The file simple.glsl is
a simple vertex and fragment shader written in GLSL.
'''
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.resources import resource_find
from kivy.graphics.transformation import Matrix
from kivy.graphics.opengl import *
from kivy.graphics import *
from objloader import ObjFile
class Renderer(Widget):
def __init__(self, **kwargs):
self.canvas = RenderContext(compute_normal_mat=True)
self.canvas.shader.source = resource_find('simple.glsl')
self.scene = ObjFile(resource_find("monkey.obj"))
super(Renderer, self).__init__(**kwargs)
with self.canvas:
self.cb = Callback(self.setup_gl_context)
PushMatrix()
self.setup_scene()
PopMatrix()
self.cb = Callback(self.reset_gl_context)
Clock.schedule_interval(self.update_glsl, 1 / 60.)
def setup_gl_context(self, *args):
glEnable(GL_DEPTH_TEST)
def reset_gl_context(self, *args):
glDisable(GL_DEPTH_TEST)
def update_glsl(self, *largs):
asp = self.width / float(self.height)
proj = Matrix().view_clip(-asp, asp, -1, 1, 1, 100, 1)
self.canvas['projection_mat'] = proj
self.canvas['diffuse_light'] = (1.0, 1.0, 0.8)
self.canvas['ambient_light'] = (0.1, 0.1, 0.1)
self.rot.angle += 1
def setup_scene(self):
Color(1, 1, 1, 1)
PushMatrix()
Translate(0, 0, -3)
self.rot = Rotate(1, 0, 1, 0)
m = list(self.scene.objects.values())[0]
UpdateNormalMatrix()
self.mesh = Mesh(
vertices=m.vertices,
indices=m.indices,
fmt=m.vertex_format,
mode='triangles',
)
PopMatrix()
class RendererApp(App):
def build(self):
return Renderer()
if __name__ == "__main__":
RendererApp().run()
| mit |
shujaatak/UAV_MissionPlanner | Lib/runpy.py | 111 | 10698 | """runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import sys
import imp
from pkgutil import read_code
try:
from imp import get_loader
except ImportError:
from pkgutil import get_loader
__all__ = [
"run_module", "run_path",
]
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
self.mod_name = mod_name
self.module = imp.new_module(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
class _ModifiedArgv0(object):
def __init__(self, value):
self.value = value
self._saved_value = self._sentinel = object()
def __enter__(self):
if self._saved_value is not self._sentinel:
raise RuntimeError("Already preserving saved value")
self._saved_value = sys.argv[0]
sys.argv[0] = self.value
def __exit__(self, *args):
self.value = self._sentinel
sys.argv[0] = self._saved_value
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__loader__ = mod_loader,
__package__ = pkg_name)
exec code in run_globals
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in new namespace with sys modified"""
with _TempModule(mod_name) as temp_module, _ModifiedArgv0(mod_fname):
mod_globals = temp_module.module.__dict__
_run_code(code, mod_globals, init_globals,
mod_name, mod_fname, mod_loader, pkg_name)
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# This helper is needed due to a missing component in the PEP 302
# loader protocol (specifically, "get_filename" is non-standard)
# Since we can't introduce new features in maintenance releases,
# support was added to zipimporter under the name '_get_filename'
def _get_filename(loader, mod_name):
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
# Helper to get the loader, code and filename for a module
def _get_module_details(mod_name):
loader = get_loader(mod_name)
if loader is None:
raise ImportError("No module named %s" % mod_name)
if loader.is_package(mod_name):
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise ImportError("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name)
except ImportError, e:
raise ImportError(("%s; %r is a package and cannot " +
"be directly executed") %(e, mod_name))
code = loader.get_code(mod_name)
if code is None:
raise ImportError("No code object available for %s" % mod_name)
filename = _get_filename(loader, mod_name)
return mod_name, loader, code, filename
def _get_main_module_details():
# Helper that gives a nicer error message when attempting to
# execute a zipfile or directory by invoking __main__.py
main_name = "__main__"
try:
return _get_module_details(main_name)
except ImportError as exc:
if main_name in str(exc):
raise ImportError("can't find %r module in %r" %
(main_name, sys.path[0]))
raise
# This function is the actual implementation of the -m switch and direct
# execution of zipfiles and directories and is deliberately kept private.
# This avoids a repeat of the situation where run_module() no longer met the
# needs of mainmodule.c, but couldn't be changed because it was public
def _run_module_as_main(mod_name, alter_argv=True):
"""Runs the designated module in the __main__ namespace
Note that the executed module will have full access to the
__main__ namespace. If this is not desirable, the run_module()
function should be used to run the module code in a fresh namespace.
At the very least, these variables in __main__ will be overwritten:
__name__
__file__
__loader__
__package__
"""
try:
if alter_argv or mod_name != "__main__": # i.e. -m switch
mod_name, loader, code, fname = _get_module_details(mod_name)
else: # i.e. directory or zipfile execution
mod_name, loader, code, fname = _get_main_module_details()
except ImportError as exc:
msg = "%s: %s" % (sys.executable, str(exc))
sys.exit(msg)
pkg_name = mod_name.rpartition('.')[0]
main_globals = sys.modules["__main__"].__dict__
if alter_argv:
sys.argv[0] = fname
return _run_code(code, main_globals, None,
"__main__", fname, loader, pkg_name)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
mod_name, loader, code, fname = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
pkg_name = mod_name.rpartition('.')[0]
if alter_sys:
return _run_module_code(code, init_globals, run_name,
fname, loader, pkg_name)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name,
fname, loader, pkg_name)
# XXX (ncoghlan): Perhaps expose the C API function
# as imp.get_importer instead of reimplementing it in Python?
def _get_importer(path_name):
"""Python version of PyImport_GetImporter C API function"""
cache = sys.path_importer_cache
try:
importer = cache[path_name]
except KeyError:
# Not yet cached. Flag as using the
# standard machinery until we finish
# checking the hooks
cache[path_name] = None
for hook in sys.path_hooks:
try:
importer = hook(path_name)
break
except ImportError:
pass
else:
# The following check looks a bit odd. The trick is that
# NullImporter throws ImportError if the supplied path is a
# *valid* directory entry (and hence able to be handled
# by the standard import machinery)
try:
importer = imp.NullImporter(path_name)
except ImportError:
return None
cache[path_name] = importer
return importer
def _get_code_from_file(fname):
# Check for a compiled file first
with open(fname, "rb") as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
with open(fname, "rU") as f:
code = compile(f.read(), fname, 'exec')
return code
def run_path(path_name, init_globals=None, run_name=None):
"""Execute code located at the specified filesystem location
Returns the resulting top level namespace dictionary
The file path may refer directly to a Python script (i.e.
one that could be directly executed with execfile) or else
it may refer to a zipfile or directory containing a top
level __main__.py script.
"""
if run_name is None:
run_name = "<run_path>"
importer = _get_importer(path_name)
if isinstance(importer, imp.NullImporter):
# Not a valid sys.path entry, so run the code directly
# execfile() doesn't help as we want to allow compiled files
code = _get_code_from_file(path_name)
return _run_module_code(code, init_globals, run_name, path_name)
else:
# Importer is defined for path, so add it to
# the start of sys.path
sys.path.insert(0, path_name)
try:
# Here's where things are a little different from the run_module
# case. There, we only had to replace the module in sys while the
# code was running and doing so was somewhat optional. Here, we
# have no choice and we have to remove it even while we read the
# code. If we don't do this, a __loader__ attribute in the
# existing __main__ module may prevent location of the new module.
main_name = "__main__"
saved_main = sys.modules[main_name]
del sys.modules[main_name]
try:
mod_name, loader, code, fname = _get_main_module_details()
finally:
sys.modules[main_name] = saved_main
pkg_name = ""
with _TempModule(run_name) as temp_module, \
_ModifiedArgv0(path_name):
mod_globals = temp_module.module.__dict__
return _run_code(code, mod_globals, init_globals,
run_name, fname, loader, pkg_name).copy()
finally:
try:
sys.path.remove(path_name)
except ValueError:
pass
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print >> sys.stderr, "No module specified for execution"
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
| gpl-2.0 |
kleetus/bitcoinxt | qa/rpc-tests/doublespendrelay.py | 4 | 5948 | #!/usr/bin/env python
#
# Test double-spend-relay and notification code
#
from test_framework import BitcoinTestFramework
from decimal import Decimal
from util import *
class DoubleSpendRelay(BitcoinTestFramework):
#
# Create a 4-node network; roles for the nodes are:
# [0] : transaction creator
# [1] : respend sender
# [2] : relay node
# [3] : receiver, should detect/notify of double-spends
#
# Node connectivity is:
# [0,1] <--> [2] <--> [3]
#
def setup_network(self):
self.is_network_split = False
self.nodes = []
for i in range(0,4):
self.nodes.append(start_node(i, self.options.tmpdir))
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[3], 2)
self.nodes[0].setgenerate(True, 110)
sync_blocks(self.nodes)
return self.nodes
def run_test(self):
fee = Decimal("0.01")
nodes = self.nodes
# Test 1: First spend
# shutdown nodes[1] so it is not aware of the first spend
# and will be willing to broadcast a respend
stop_node(nodes[1], 1)
# First spend: nodes[0] -> nodes[3]
amount = Decimal("144") # We rely on this requiring 3 50-BTC inputs
(total_in, tx1_inputs) = gather_inputs(nodes[0], amount+fee)
change_outputs = make_change(nodes[0], total_in, amount, fee)
outputs = dict(change_outputs)
outputs[nodes[3].getnewaddress()] = amount
signed = nodes[0].signrawtransaction(nodes[0].createrawtransaction(tx1_inputs, outputs))
txid1 = nodes[0].sendrawtransaction(signed["hex"], True)
sync_mempools([nodes[0], nodes[3]])
txid1_info = nodes[3].gettransaction(txid1)
assert_equal(txid1_info["respendsobserved"], [])
# Test 2: Is double-spend of tx1_inputs[0] relayed?
# Restart nodes[1]
nodes[1] = start_node(1, self.options.tmpdir)
connect_nodes(nodes[1], 2)
# Second spend: nodes[0] -> nodes[0]
amount = Decimal("40")
total_in = Decimal("50")
inputs2 = [tx1_inputs[0]]
change_outputs = make_change(nodes[0], total_in, amount, fee)
outputs = dict(change_outputs)
outputs[nodes[0].getnewaddress()] = amount
signed = nodes[0].signrawtransaction(nodes[0].createrawtransaction(inputs2, outputs))
txid2 = nodes[1].sendrawtransaction(signed["hex"], True)
# Wait until txid2 is relayed to nodes[3] (but don't wait forever):
# Note we can't use sync_mempools, because the respend isn't added to
# the mempool.
for i in range(1,7):
txid1_info = nodes[3].gettransaction(txid1)
if txid1_info["respendsobserved"] != []:
break
time.sleep(0.1 * i**2) # geometric back-off
assert_equal(txid1_info["respendsobserved"], [txid2])
# Test 3: Is triple-spend of tx1_inputs[0] not relayed?
# Clear node1 mempool
stop_node(nodes[1], 1)
nodes[1] = start_node(1, self.options.tmpdir)
connect_nodes(nodes[1], 2)
# Third spend: nodes[0] -> nodes[0]
outputs = dict(change_outputs)
outputs[nodes[0].getnewaddress()] = amount
signed = nodes[0].signrawtransaction(nodes[0].createrawtransaction(inputs2, outputs))
txid3 = nodes[1].sendrawtransaction(signed["hex"], True)
# Ensure txid3 not relayed to nodes[3]:
time.sleep(9.1)
txid1_info = nodes[3].gettransaction(txid1)
assert_equal(txid1_info["respendsobserved"], [txid2])
# Test 4: Is double-spend of tx1_inputs[1] relayed when triple-spend of tx1_inputs[0] precedes it?
# Clear node1 mempool
stop_node(nodes[1], 1)
nodes[1] = start_node(1, self.options.tmpdir)
connect_nodes(nodes[1], 2)
# Inputs are third spend, second spend
amount = Decimal("89")
total_in = Decimal("100")
inputs4 = [tx1_inputs[0],tx1_inputs[1]]
change_outputs = make_change(nodes[0], total_in, amount, fee)
outputs = dict(change_outputs)
outputs[nodes[0].getnewaddress()] = amount
signed = nodes[0].signrawtransaction(nodes[0].createrawtransaction(inputs4, outputs))
txid4 = nodes[1].sendrawtransaction(signed["hex"], True)
# Wait until txid4 is relayed to nodes[3] (but don't wait forever):
for i in range(1,7):
txid1_info = nodes[3].gettransaction(txid1)
if txid1_info["respendsobserved"] != [txid2]:
break
time.sleep(0.1 * i**2) # geometric back-off
assert_equal(sorted(txid1_info["respendsobserved"]), sorted([txid2,txid4]))
# Test 5: Is double-spend of tx1_inputs[2] relayed when triple-spend of tx1_inputs[0] follows it?
# Clear node1 mempool
stop_node(nodes[1], 1)
nodes[1] = start_node(1, self.options.tmpdir)
connect_nodes(nodes[1], 2)
# Inputs are second spend, third spend
amount = Decimal("88")
total_in = Decimal("100")
inputs5 = [tx1_inputs[2],tx1_inputs[0]]
change_outputs = make_change(nodes[0], total_in, amount, fee)
outputs = dict(change_outputs)
outputs[nodes[0].getnewaddress()] = amount
signed = nodes[0].signrawtransaction(nodes[0].createrawtransaction(inputs5, outputs))
txid5 = nodes[1].sendrawtransaction(signed["hex"], True)
# Wait until txid5 is relayed to nodes[3] (but don't wait forever):
for i in range(1,7):
txid1_info = nodes[3].gettransaction(txid1)
if sorted(txid1_info["respendsobserved"]) != sorted([txid2,txid4]):
break
time.sleep(0.1 * i**2) # geometric back-off
assert_equal(sorted(txid1_info["respendsobserved"]), sorted([txid2,txid4,txid5]))
if __name__ == '__main__':
DoubleSpendRelay().main()
| mit |
UCL-INGI/INGInious | inginious/frontend/pages/social.py | 1 | 3533 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" Auth page """
import flask
from flask import redirect
from werkzeug.exceptions import NotFound
from inginious.frontend.pages.utils import INGIniousPage, INGIniousAuthPage
class AuthenticationPage(INGIniousPage):
def process_signin(self,auth_id):
auth_method = self.user_manager.get_auth_method(auth_id)
if not auth_method:
raise self.app.notfound(message=_("Auth method doesn't exist"))
auth_storage = self.user_manager.session_auth_storage().setdefault(auth_id, {})
auth_storage["redir_url"] = flask.request.referrer or '/'
auth_storage["method"] = "signin"
auth_link = auth_method.get_auth_link(auth_storage)
return redirect(auth_link)
def GET(self, auth_id):
if self.user_manager.session_cookieless():
return redirect("/auth/signin/" + auth_id)
return self.process_signin(auth_id)
def POST(self, auth_id):
return self.process_signin(auth_id)
class CallbackPage(INGIniousPage):
def process_callback(self, auth_id):
auth_method = self.user_manager.get_auth_method(auth_id)
if not auth_method:
raise self.app.notfound(message=_("Auth method doesn't exist."))
auth_storage = self.user_manager.session_auth_storage().setdefault(auth_id, {})
user = auth_method.callback(auth_storage)
if user and auth_storage.get("method", "") == "signin":
if not self.user_manager.bind_user(auth_id, user):
return redirect("/signin?binderror")
elif user and auth_storage.get("method", "") == "share":
submission = self.submission_manager.get_submission(auth_storage["submissionid"], True)
if submission:
course = self.course_factory.get_course(submission["courseid"])
task = course.get_task(submission["taskid"])
auth_method.share(auth_storage, course, task, submission, self.user_manager.session_language())
else:
raise NotFound(description=_("Submission doesn't exist."))
else:
return redirect("/signin?callbackerror")
return redirect(auth_storage.get("redir_url", "/"))
def GET(self, auth_id):
if self.user_manager.session_cookieless():
return redirect("/auth/signin/" + auth_id)
return self.process_callback(auth_id)
def POST(self, auth_id):
return self.process_callback(auth_id)
class SharePage(INGIniousAuthPage):
def process_share(self, auth_id):
auth_method = self.user_manager.get_auth_method(auth_id)
if not auth_method:
raise NotFound(description=_("Auth method not found."))
auth_storage = self.user_manager.session_auth_storage().setdefault(auth_id, {})
auth_storage["redir_url"] = flask.request.referrer or '/'
auth_storage["method"] = "share"
auth_storage["submissionid"] = flask.request.args.get("submissionid", "") or flask.request.form.get("submissionid", "")
auth_link = auth_method.get_auth_link(auth_storage, True)
return redirect(auth_link)
def GET(self, auth_id):
if self.user_manager.session_cookieless():
return redirect("/auth/share/" + auth_id)
return self.process_share(auth_id)
def POST(self, auth_id):
return self.process_share(auth_id)
| agpl-3.0 |
azumimuo/family-xbmc-addon | script.module.urlresolver/lib/urlresolver/plugins/videa.py | 8 | 1958 | # -*- coding: UTF-8 -*-
"""
Kodi urlresolver plugin
Copyright (C) 2016 alifrezser
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VideaResolver(UrlResolver):
name = "videa"
domains = ["videa.hu", "videakid.hu"]
pattern = '(?://|\.)((?:videa|videakid)\.hu)/(?:player/?\?v=|videok/)(?:.*-|)([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
sources = helpers.get_dom(html, 'video_sources')
if sources:
sources = re.findall('name\s*=\s*[\'|"]([^\'"]+).+?streamable.+?>([^<]+)', sources[0])
if sources[-1][0].lower() == 'lq': sources = sources[::-1]
source = helpers.pick_source(sources)
if source.startswith('//'): source = 'http:' + source
return source
raise ResolverError('Stream not found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/videaplayer_get_xml.php?v={media_id}&start=0&referrer=http://{host}')
| gpl-2.0 |
dhague/vpower | tests/test_TacxBlueMotionPowerCalculatorTest.py | 1 | 1059 | import unittest
from TacxBlueMotionPowerCalculator import TacxBlueMotionPowerCalculator
class TacxBlueMotionPowerCalculatorTest(unittest.TestCase):
def setUp(self):
self.calculator = TacxBlueMotionPowerCalculator()
def test_calculation_min(self):
power = self.calculator.power_from_speed(0.0)
self.assertEqual(power, 0)
def test_calculation_1(self):
power = self.calculator.power_from_speed(self.get_revs(9.7))
self.assertEqual(power, 63)
def test_calculation_2(self):
power = self.calculator.power_from_speed(self.get_revs(16.3))
self.assertEqual(power, 109)
def test_calculation_3(self):
power = self.calculator.power_from_speed(self.get_revs(42.1))
self.assertEqual(power, 331)
def test_calculation_max(self):
power = self.calculator.power_from_speed(self.get_revs(88.1))
self.assertEqual(power, 560)
def get_revs(self, speed_kph):
speed_mps = speed_kph / 3.6
return speed_mps / self.calculator.wheel_circumference
| mit |
bolshoibooze/hone | HostAgent/agentFreFrp.py | 2 | 6185 | ################################################################################
# The Frenetic Project #
# frenetic@frenetic-lang.org #
################################################################################
# Licensed to the Frenetic Project by one or more contributors. See the #
# NOTICE file distributed with this work for additional information #
# regarding copyright and ownership. The Frenetic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this work for specific language governing #
# permissions and limitations under the License. #
################################################################################
# /src/frenetic_frp.py #
# Frenetic FRP representation #
# $Id$ #
################################################################################
import threading
import agentFreUtil as util
def mk_add_listener(self):
def f(l):
x = self.next()
self.listeners[x] = l
def d():
del self.listeners[x]
return d
return f
def mk_prepare(self):
def f():
for l in self.listeners.itervalues():
l.prepare()
return f
def mk_push(self):
def f(x):
for l in self.listeners.itervalues():
l.push(x)
return f
def mk_finish(self):
def f():
for l in self.listeners.itervalues():
l.finish()
return f
def mk_terminate(self):
def f():
for l in self.listeners.itervalues():
l.terminate()
return f
def mk_next(self):
def f():
self.lid = self.lid + 1
return self.lid
return f
# Composition operators
def ComposeEFEF(ef1,ef2):
ef1.add_listener(ef2)
ef = FEventFun(ef2.add_listener,ef1.prepare,ef1.push,ef1.finish,ef1.terminate)
return ef
def ComposeEEF(e1,ef2):
e = None
def prepare():
e.fresh = False
ef2.prepare()
def push(x):
ef2.push(x)
def finish():
ef2.finish()
e.fresh = True
e1.add_listener(FListener(prepare,push,finish,ef2.terminate))
e = FEvent(ef2.add_listener)
return e
def ComposeEL(e,l):
e.add_listener(l)
# Events
class FEvent:
def __init__(self,add_listener=None,type=None):
self.fresh = True
self.lid = 0
self.listeners = {}
self.next = mk_next(self)
self.add_listener = mk_add_listener(self) if add_listener is None else add_listener
self.type = type
def __rshift__(self,other):
other_name = other.__class__.__name__
if other_name == "FEventFun":
return ComposeEEF(self,other)
elif other_name == "FListener":
return ComposeEL(self,other)
else:
raise util.IllegalArgument("Cannot compose FEvent and %s" % other_name)
# Event functions
class FEventFun:
def __init__(self,add_listener=None,prepare=None,push=None,finish=None,terminate=None,type_fun=None):
self.listeners = {}
self.lid = 0
self.next = mk_next(self)
self.add_listener = mk_add_listener(self) if add_listener is None else add_listener
self.prepare = mk_prepare(self) if prepare is None else prepare
self.push = mk_push(self) if push is None else push
self.finish = mk_finish(self) if finish is None else finish
self.terminate = mk_terminate(self) if terminate is None else terminate
self.type_fun = (lambda x : None) if type_fun is None else type_fun
def __rshift__(self,other):
other_name = other.__class__.__name__
if other_name == "FEventFun":
return ComposeEFEF(self,other)
else:
raise util.IllegalArgument("Cannot compose FEvent and %s" % other_name)
# Listeners
class FListener:
def __init__(self,prepare=(lambda:()),push=lambda x:(),finish=lambda:(),terminate=(lambda:())):
self.prepare = prepare
self.push = push
self.finish = finish
self.terminate = terminate
# Behaviors
class FBehavior:
def __init__(self,pull,type=None):
self.pull = pull
self.type = type
# event_lock: a global lock ensuring atomic propogation of events.
event_lock = threading.Lock()
# RawEvent: generates an event stream and a "go" function to propagate a value
def RawEvent():
e = FEvent()
def go(x):
event_lock.acquire()
e.fresh = False
for l in e.listeners.itervalues():
l.prepare()
for l in e.listeners.itervalues():
l.push(x)
for l in e.listeners.itervalues():
l.finish()
event_lock.release()
e.fresh = True
return (e,go)
| bsd-3-clause |
strahlex/machinekit | src/hal/user_comps/hal_temp_atlas.py | 6 | 5185 | #!/usr/bin/python2
# encoding: utf-8
"""
Temperature.py
Created by Alexander Rössler on 2014-03-24.
"""
from fdm.r2temp import R2Temp
import argparse
import time
import sys
import hal
# The CRAMPS board thermistor input has one side grounded and the other side
# pulled high through a 1.00K resistor to 1.8V. Following this is a 4.7K
# resistor, some protection diodes, and filtering capacitors. The ADC voltage
# read is the filtered voltage across the thermistor.
def adc2r_cramps(pin,ref):
# V_adc = pin.rawValue * 1.8 / 4096.0
# V_adc = pin.rawValue * 3.3 / 3315.0
V_adc = pin.rawValue * 3.3 / ref
V_T = 0.0 # Voltage across the thermistor
R_PU = 2000.0 #Pull-up resistence
I_PU = 0.0 # Current flowing through the pull-up resistor
R_T = 0.0 # Resistance of the thermistor
V_T = V_adc
# No dividing by zero or negative voltages despite what the ADC says!
# Clip to a small positive value
I_PU = max((3.3 - V_T ) / R_PU, 0.000001)
# I_PU = max((5.0 - V_T ) / R_PU, 0.000001)
R_T = V_T / I_PU
return R_T
class Pin:
def __init__(self):
self.pin = 0
self.r2temp = None
self.halValuePin = 0
self.halRawPin = 0
self.filterSamples = []
self.filterSize = 10
self.rawValue = 0.0
self.filterSamples = []
self.rawValue = 0.0
def addSample(self, value):
self.filterSamples.append(value)
if (len(self.filterSamples) > self.filterSize):
self.filterSamples.pop(0)
sampleSum = 0.0
for sample in self.filterSamples:
sampleSum += sample
# No dividing by zero or negative voltages despite what the ADC says!
# Clip to a small positive value
self.rawValue = max(sampleSum / len(self.filterSamples), 0.000001)
def getHalName(pin):
return "ch-" + '{0:02d}'.format(pin.pin)
def adc2Temp(pin,ref):
# R1 = 4700.0
# R2 = R1 / max(4095.0 / pin.rawValue - 1.0, 0.000001)
# return round(pin.r2temp.r2t(R2) * 10.0) / 10.0
R = adc2r_cramps(pin,ref)
return round(pin.r2temp.r2t(R) * 10.0) / 10.0
parser = argparse.ArgumentParser(description='HAL component to read Temperature values over I2C')
parser.add_argument('-n', '--name', help='HAL component name', required=True)
parser.add_argument('-i', '--interval', help='I2C update interval', default=0.05)
parser.add_argument('-r', '--ref', help='Calibrate Thermistor ref voltage via ref ch', default="n")
parser.add_argument('-c', '--channels', help='Komma separated list of channels and thermistors to use e.g. 01:semitec_103GT_2,02:epcos_B57560G1104', required=True)
parser.add_argument('-f', '--filter_size', help='Size of the low pass filter to use', default=10)
parser.add_argument('-d', '--delay', help='Delay before the i2c should be updated', default=0.0)
args = parser.parse_args()
updateInterval = float(args.interval)
delayInterval = float(args.delay)
filterSize = int(args.filter_size)
error = True
watchdog = True
# Create pins
pins = []
if (args.channels != ""):
channelsRaw = args.channels.split(',')
for channel in channelsRaw:
pinRaw = channel.split(':')
if (len(pinRaw) != 2):
print(("wrong input"))
sys.exit(1)
pin = Pin()
pin.pin = int(pinRaw[0])
if ((pin.pin > 7) or (pin.pin < 0)):
print(("Pin not available"))
sys.exit(1)
if (pinRaw[1] != "none"):
pin.r2temp = R2Temp(pinRaw[1])
pin.filterSize = filterSize
pins.append(pin)
# Initialize HAL
h = hal.component(args.name)
for pin in pins:
pin.halRawPin = h.newpin(getHalName(pin) + ".raw", hal.HAL_FLOAT, hal.HAL_OUT)
if (pin.r2temp is not None):
pin.halInputPin = h.newpin(getHalName(pin) + ".input", hal.HAL_U32, hal.HAL_IN)
pin.halValuePin = h.newpin(getHalName(pin) + ".value", hal.HAL_FLOAT, hal.HAL_OUT)
if (args.ref == 'y'):
halRefPin = h.newpin("voltage-ref", hal.HAL_U32, hal.HAL_IN)
halErrorPin = h.newpin("error", hal.HAL_BIT, hal.HAL_OUT)
halNoErrorPin = h.newpin("no-error", hal.HAL_BIT, hal.HAL_OUT)
halWatchdogPin = h.newpin("watchdog", hal.HAL_BIT, hal.HAL_OUT)
h.ready()
halErrorPin.value = error
halNoErrorPin.value = not error
halWatchdogPin.value = watchdog
try:
time.sleep(delayInterval)
while (True):
try:
if (args.ref == 'y'):
ref = (halRefPin.value & 0x00000FFF)
if (ref == 0):
ref = 3300
else:
ref = 3300
for pin in pins:
value = float(pin.halInputPin.value & 0x00000FFF)
pin.addSample(value)
pin.halRawPin.value = pin.rawValue
if (pin.r2temp is not None):
pin.halValuePin.value = adc2Temp(pin,ref)
error = False
except IOError as e:
error = True
halErrorPin.value = error
halNoErrorPin.value = not error
watchdog = not watchdog
halWatchdogPin.value = watchdog
time.sleep(updateInterval)
except:
print(("exiting HAL component " + args.name))
h.exit()
| lgpl-2.1 |
Danziger/Pluralsight-Network-Penetration-Testing-Using-Python-and-Kali-Linux | 004 - Basic Sniffer/main.py | 1 | 2136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
from ctypes import *
class IPHeader(Structure):
_fields_ = [
('ihl', c_ubyte, 4),
('version', c_ubyte, 4),
('tos', c_ubyte),
('len', c_ushort),
('id', c_ushort),
('offset', c_ushort),
('ttl', c_ubyte),
('protocol_num', c_ubyte),
('sum', c_ushort),
('src', c_uint32),
('dst', c_uint32)
]
def __new__(self, data=None):
return self.from_buffer_copy(data)
def __init__(self, data=None):
# Map source and destination IP addresses
self.source_address = socket.inet_ntoa(struct.pack('@I', self.src))
self.destination_address = socket.inet_ntoa(struct.pack('@I', self.dst))
# Map protocol constants
# TODO: Extend protocol support
self.protocols = {1: 'ICMP', 6: 'TCP', 17: 'UDP'}
# Get protocol name
try:
self.protocol = self.protocols[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
def initTCPSocket():
# Create the socket object
sniffer_TCP = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
# Bind it to localhost
sniffer_TCP.bind(('0.0.0.0', 0))
# Include the IP header
sniffer_TCP.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
return sniffer_TCP
def startSniffing():
# TCP
sniffer_TCP = initTCPSocket()
print 'Sniffer is listening for incoming connections...'
try:
while True:
raw_buffer_TCP = sniffer_TCP.recvfrom(65535)[0]
IP_header_TCP = IPHeader(raw_buffer_TCP[0:20])
if IP_header_TCP.protocol == 'TCP':
print '%s\t%s → %s' % (IP_header_TCP.protocol, IP_header_TCP.source_address, IP_header_TCP.destination_address)
except KeyboardInterrupt:
print 'Exiting program...'
exit(0)
def main():
startSniffing()
if __name__ == '__main__':
main() | mit |
msemple1111/kahoot-hack | lib/requests/packages/urllib3/connection.py | 64 | 12593 | from __future__ import absolute_import
import datetime
import logging
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
from .packages.six.moves.http_client import HTTPException # noqa: F401
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
assert_fingerprint,
create_urllib3_context,
ssl_wrap_socket
)
from .util import connection
from ._collections import HTTPHeaderDict
log = logging.getLogger(__name__)
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
def request_chunked(self, method, url, body=None, headers=None):
"""
Alternative to the common request method, which sends the
body with chunked encoding and not as one block
"""
headers = HTTPHeaderDict(headers if headers is not None else {})
skip_accept_encoding = 'accept-encoding' in headers
skip_host = 'host' in headers
self.putrequest(
method,
url,
skip_accept_encoding=skip_accept_encoding,
skip_host=skip_host
)
for header, value in headers.items():
self.putheader(header, value)
if 'transfer-encoding' not in headers:
self.putheader('Transfer-Encoding', 'chunked')
self.endheaders()
if body is not None:
stringish_types = six.string_types + (six.binary_type,)
if isinstance(body, stringish_types):
body = (body,)
for chunk in body:
if not chunk:
continue
if not isinstance(chunk, six.binary_type):
chunk = chunk.encode('utf8')
len_str = hex(len(chunk))[2:]
self.send(len_str.encode('utf-8'))
self.send(b'\r\n')
self.send(chunk)
self.send(b'\r\n')
# After the if clause, to always have a closed body
self.send(b'0\r\n\r\n')
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
ssl_version = None
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
ssl_context=None, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
self.ssl_context = ssl_context
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(None),
cert_reqs=resolve_cert_reqs(None),
)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ssl_context=self.ssl_context,
)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
"""
This method should only be called once, before the connection is used.
"""
# If cert_reqs is not provided, we can try to guess. If the user gave
# us a cert database, we assume they want to use it: otherwise, if
# they gave us an SSL Context object we should use whatever is set for
# it.
if cert_reqs is None:
if ca_certs or ca_cert_dir:
cert_reqs = 'CERT_REQUIRED'
elif self.ssl_context is not None:
cert_reqs = self.ssl_context.verify_mode
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
if self.ssl_context is None:
self.ssl_context = create_urllib3_context(
ssl_version=resolve_ssl_version(self.ssl_version),
cert_reqs=resolve_cert_reqs(self.cert_reqs),
)
context = self.ssl_context
context.verify_mode = resolve_cert_reqs(self.cert_reqs)
self.sock = ssl_wrap_socket(
sock=conn,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_context=context)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif context.verify_mode != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
_match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (
context.verify_mode == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None
)
def _match_hostname(cert, asserted_hostname):
try:
match_hostname(cert, asserted_hostname)
except CertificateError as e:
log.error(
'Certificate did not match expected hostname: %s. '
'Certificate: %s', asserted_hostname, cert
)
# Add cert to exception and reraise so client code can inspect
# the cert when catching the exception, if they want to
e._peer_cert = cert
raise
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| gpl-3.0 |
wkarmistead/InvestmentAnalysisDashboard | pyBackend/dataLayer/InputTechnicalAnalysis.py | 1 | 1167 | import os
import csv
from Application import *
class InputTechnicalAnalysis(object):
'''
InputTechnicalAnalysis loads the input file TechnicalIndicators.csv
'''
Inputs = []
def __init__(self):
self._rootPath = Application.getRootDirectory() # Get application root directory
self._inputPath = self._rootPath + '/settings/input_TechnicalIndicators.csv' # Set path to data folder
print('Loading technical analysis settings...')
self._initializeObject()
def _initializeObject(self):
self._loadInput()
def _loadInput(self):
inputFileCSV = csv.reader(open(self._inputPath, newline=''))
for row in inputFileCSV:
i = 0
thisInput = Input()
thisInput.listArgs = [None] * (len(row) - 1)
for col in row:
if i == 0:
thisInput.type = col
else:
thisInput.listArgs[i - 1] = col
i = i + 1
#thisInput.listArgs.ravel()
self.Inputs.append(thisInput)
class Input(object):
type = ''
listArgs = []
| gpl-2.0 |
Omegaphora/external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/android_browser_finder_unittest.py | 25 | 3287 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core.platform import android_device
from telemetry.core.platform import android_platform_backend
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.unittest import system_stub
class AndroidBrowserFinderTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_browser_finder,
['adb_commands', 'os', 'subprocess',
'logging'])
self._android_device_stub = system_stub.Override(
android_device, ['adb_commands'])
self._apb_stub = system_stub.Override(
android_platform_backend, ['adb_commands'])
def tearDown(self):
self._stubs.Restore()
self._android_device_stub.Restore()
self._apb_stub.Restore()
def test_no_adb(self):
finder_options = browser_options.BrowserFinderOptions()
def NoAdb(*args, **kargs): # pylint: disable=W0613
raise OSError('not found')
self._stubs.subprocess.Popen = NoAdb
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(0, len(browsers))
def test_adb_no_devices(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(0, len(browsers))
def test_adb_permissions_error(self):
finder_options = browser_options.BrowserFinderOptions()
self._stubs.subprocess.Popen.communicate_result = (
"""List of devices attached
????????????\tno permissions""",
"""* daemon not running. starting it now on port 5037 *
* daemon started successfully *
""")
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(3, len(self._stubs.logging.warnings))
self.assertEquals(0, len(browsers))
def test_adb_two_devices(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = [
'015d14fec128220c', '015d14fec128220d']
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(1, len(self._stubs.logging.warnings))
self.assertEquals(0, len(browsers))
@benchmark.Disabled('chromeos')
def test_adb_one_device(self):
finder_options = browser_options.BrowserFinderOptions()
self._android_device_stub.adb_commands.attached_devices = (
['015d14fec128220c'])
def OnPM(args):
assert args[0] == 'pm'
assert args[1] == 'list'
assert args[2] == 'packages'
return ['package:org.chromium.content_shell_apk',
'package.com.google.android.setupwizard']
def OnLs(_):
return ['/sys/devices/system/cpu/cpu0']
self._apb_stub.adb_commands.adb_device.shell_command_handlers['pm'] = OnPM
self._apb_stub.adb_commands.adb_device.shell_command_handlers['ls'] = OnLs
browsers = android_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertEquals(1, len(browsers))
| bsd-3-clause |
slyphon/pants | src/python/pants/base/deprecated.py | 6 | 5205 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import inspect
import warnings
from functools import wraps
import six
from pants.base.revision import Revision
from pants.version import VERSION
_PANTS_SEMVER = Revision.semver(VERSION)
class DeprecationApplicationError(Exception):
"""The base exception type thrown for any form of @deprecation application error."""
class MissingRemovalVersionError(DeprecationApplicationError):
"""Indicates the required removal_version was not supplied."""
class BadRemovalVersionError(DeprecationApplicationError):
"""Indicates the supplied removal_version was not a valid semver string."""
class PastRemovalVersionError(DeprecationApplicationError):
"""Indicates the supplied removal_version is not in the future.
All deprecations must give at least until the next release for users to adapt.
"""
class BadDecoratorNestingError(DeprecationApplicationError):
"""Indicates the @deprecated decorator was innermost in a sequence of layered decorators."""
def check_deprecated_semver(removal_version):
"""Check to see if the removal version is < the current Pants version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:raises DeprecationApplicationError if the removal_version parameter is invalid or the version
is not an earlier version than the current release version.
"""
if not isinstance(removal_version, six.string_types):
raise BadRemovalVersionError('The removal_version must be a semver version string.')
try:
removal_semver = Revision.semver(removal_version)
except Revision.BadRevision as e:
raise BadRemovalVersionError('The given removal version {} is not a valid semver: '
'{}'.format(removal_version, e))
if removal_semver <= _PANTS_SEMVER:
raise PastRemovalVersionError('The removal version must be greater than the current pants '
'version of {} - given {}'.format(VERSION, removal_version))
def deprecated(removal_version, hint_message=None):
"""Marks a function or method as deprecated.
A removal version must be supplied and it must be greater than the current 'pantsbuild.pants'
version.
When choosing a removal version there is a natural tension between the code-base, which benefits
from short deprecation cycles, and the user-base which may prefer to deal with deprecations less
frequently. As a rule of thumb, if the hint message can fully convey corrective action
succinctly and you judge the impact to be on the small side (effects custom tasks as opposed to
effecting BUILD files), lean towards the next release version as the removal version; otherwise,
consider initiating a discussion to win consensus on a reasonable removal version.
:param str removal_version: The pantsbuild.pants version which will remove the deprecated
function.
:param str hint_message: An optional hint pointing to alternatives to the deprecation.
:raises DeprecationApplicationError if the @deprecation is applied improperly.
"""
if removal_version is None:
raise MissingRemovalVersionError('A removal_version must be specified for this deprecation.')
check_deprecated_semver(removal_version)
def decorator(func):
if not inspect.isfunction(func):
raise BadDecoratorNestingError('The @deprecated decorator must be applied innermost of all '
'decorators.')
warning_message = ('\n{module}.{func_name} is deprecated and will be removed in version '
'{removal_version}').format(module=func.__module__,
func_name=func.__name__,
removal_version=removal_version)
if hint_message:
warning_message += (':\n' + hint_message)
else:
warning_message += '.'
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(warning_message, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator
def deprecated_module(removal_version, hint_message=None):
"""Marks an entire module as deprecated.
Add a call to this at the top of the deprecated module, and it will print a warning message
when the module is imported.
Arguments are as for deprecated(), above.
"""
if removal_version is None:
raise MissingRemovalVersionError('A removal_version must be specified for this deprecation.')
check_deprecated_semver(removal_version)
warning_message = ('\nModule is deprecated and will be removed in version '
'{removal_version}').format(removal_version=removal_version)
if hint_message:
warning_message += (': ' + hint_message)
else:
warning_message += '.'
warnings.warn(warning_message, DeprecationWarning, stacklevel=2)
| apache-2.0 |
rghe/ansible | test/units/modules/cloud/amazon/test_ec2_vpc_vpn.py | 64 | 15038 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
import os
from units.utils.amazon_placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import ec2_vpc_vpn
from ansible.module_utils._text import to_text
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, boto3_tag_list_to_ansible_dict
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def get_vgw(connection):
# see if two vgw exist and return them if so
vgw = connection.describe_vpn_gateways(Filters=[{'Name': 'tag:Ansible_VPN', 'Values': ['Test']}])
if len(vgw['VpnGateways']) >= 2:
return [vgw['VpnGateways'][0]['VpnGatewayId'], vgw['VpnGateways'][1]['VpnGatewayId']]
# otherwise create two and return them
vgw_1 = connection.create_vpn_gateway(Type='ipsec.1')
vgw_2 = connection.create_vpn_gateway(Type='ipsec.1')
for resource in (vgw_1, vgw_2):
connection.create_tags(Resources=[resource['VpnGateway']['VpnGatewayId']], Tags=[{'Key': 'Ansible_VPN', 'Value': 'Test'}])
return [vgw_1['VpnGateway']['VpnGatewayId'], vgw_2['VpnGateway']['VpnGatewayId']]
def get_cgw(connection):
# see if two cgw exist and return them if so
cgw = connection.describe_customer_gateways(DryRun=False, Filters=[{'Name': 'state', 'Values': ['available']},
{'Name': 'tag:Name', 'Values': ['Ansible-CGW']}])
if len(cgw['CustomerGateways']) >= 2:
return [cgw['CustomerGateways'][0]['CustomerGatewayId'], cgw['CustomerGateways'][1]['CustomerGatewayId']]
# otherwise create and return them
cgw_1 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='9.8.7.6', BgpAsn=65000)
cgw_2 = connection.create_customer_gateway(DryRun=False, Type='ipsec.1', PublicIp='5.4.3.2', BgpAsn=65000)
for resource in (cgw_1, cgw_2):
connection.create_tags(Resources=[resource['CustomerGateway']['CustomerGatewayId']], Tags=[{'Key': 'Ansible-CGW', 'Value': 'Test'}])
return [cgw_1['CustomerGateway']['CustomerGatewayId'], cgw_2['CustomerGateway']['CustomerGatewayId']]
def get_dependencies():
if os.getenv('PLACEBO_RECORD'):
module = FakeModule(**{})
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
vgw = get_vgw(connection)
cgw = get_cgw(connection)
else:
vgw = ["vgw-35d70c2b", "vgw-32d70c2c"]
cgw = ["cgw-6113c87f", "cgw-9e13c880"]
return cgw, vgw
def setup_mod_conn(placeboify, params):
conn = placeboify.client('ec2')
m = FakeModule(**params)
return m, conn
def make_params(cgw, vgw, tags=None, filters=None, routes=None):
tags = {} if tags is None else tags
filters = {} if filters is None else filters
routes = [] if routes is None else routes
return {'customer_gateway_id': cgw,
'static_only': True,
'vpn_gateway_id': vgw,
'connection_type': 'ipsec.1',
'purge_tags': True,
'tags': tags,
'filters': filters,
'routes': routes}
def make_conn(placeboify, module, connection):
customer_gateway_id = module.params['customer_gateway_id']
static_only = module.params['static_only']
vpn_gateway_id = module.params['vpn_gateway_id']
connection_type = module.params['connection_type']
check_mode = module.params['check_mode']
changed = True
vpn = ec2_vpc_vpn.create_connection(connection, customer_gateway_id, static_only, vpn_gateway_id, connection_type)
return changed, vpn
def tear_down_conn(placeboify, connection, vpn_connection_id):
ec2_vpc_vpn.delete_connection(connection, vpn_connection_id)
def test_find_connection_vpc_conn_id(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# find the connection with a vpn_connection_id and assert it is the expected one
assert vpn1['VpnConnectionId'] == ec2_vpc_vpn.find_connection(conn1, params1, vpn1['VpnConnectionId'])['VpnConnectionId']
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_filters(placeboify, maybe_sleep):
# setup dependencies for 2 vpn connections
dependencies = setup_req(placeboify, 2)
dep1, dep2 = dependencies[0], dependencies[1]
params1, vpn1, m1, conn1 = dep1['params'], dep1['vpn'], dep1['module'], dep1['connection']
params2, vpn2, m2, conn2 = dep2['params'], dep2['vpn'], dep2['module'], dep2['connection']
# update to different tags
params1.update(tags={'Wrong': 'Tag'})
params2.update(tags={'Correct': 'Tag'})
ec2_vpc_vpn.ensure_present(conn1, params1)
ec2_vpc_vpn.ensure_present(conn2, params2)
# create some new parameters for a filter
params = {'filters': {'tags': {'Correct': 'Tag'}}}
# find the connection that has the parameters above
found = ec2_vpc_vpn.find_connection(conn1, params)
# assert the correct connection was found
assert found['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete the connections
tear_down_conn(placeboify, conn1, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn2, vpn2['VpnConnectionId'])
def test_find_connection_insufficient_filters(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create two connections with the same tags
params = make_params(cgw[0], vgw[0], tags={'Correct': 'Tag'})
params2 = make_params(cgw[1], vgw[1], tags={'Correct': 'Tag'})
m, conn = setup_mod_conn(placeboify, params)
m2, conn2 = setup_mod_conn(placeboify, params2)
_, vpn1 = ec2_vpc_vpn.ensure_present(conn, m.params)
_, vpn2 = ec2_vpc_vpn.ensure_present(conn2, m2.params)
# reset the parameters so only filtering by tags will occur
m.params = {'filters': {'tags': {'Correct': 'Tag'}}}
# assert that multiple matching connections have been found
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.find_connection(conn, m.params)
assert error_message == "More than one matching VPN connection was found.To modify or delete a VPN please specify vpn_connection_id or add filters."
# delete the connections
tear_down_conn(placeboify, conn, vpn1['VpnConnectionId'])
tear_down_conn(placeboify, conn, vpn2['VpnConnectionId'])
def test_find_connection_nonexistent(placeboify, maybe_sleep):
# create parameters but don't create a connection with them
params = {'filters': {'tags': {'Correct': 'Tag'}}}
m, conn = setup_mod_conn(placeboify, params)
# try to find a connection with matching parameters and assert None are found
assert ec2_vpc_vpn.find_connection(conn, m.params) is None
def test_create_connection(placeboify, maybe_sleep):
# get list of customer gateways and virtual private gateways
cgw, vgw = get_dependencies()
# create a connection
params = make_params(cgw[0], vgw[0])
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_present(conn, m.params)
# assert that changed is true and that there is a connection id
assert changed is True
assert 'VpnConnectionId' in vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_create_connection_that_exists(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# try to recreate the same connection
changed, vpn2 = ec2_vpc_vpn.ensure_present(conn, params)
# nothing should have changed
assert changed is False
assert vpn['VpnConnectionId'] == vpn2['VpnConnectionId']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_modify_deleted_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
# try to update the deleted connection
m.params.update(vpn_connection_id=vpn['VpnConnectionId'])
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.ensure_present(conn, m.params)
assert error_message == "There is no VPN connection available or pending with that id. Did you delete it?"
def test_delete_connection(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# delete it
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is True
assert vpn == {}
def test_delete_nonexistent_connection(placeboify, maybe_sleep):
# create parameters and ensure any connection matching (None) is deleted
params = {'filters': {'tags': {'ThisConnection': 'DoesntExist'}}}
m, conn = setup_mod_conn(placeboify, params)
changed, vpn = ec2_vpc_vpn.ensure_absent(conn, m.params)
assert changed is False
assert vpn == {}
def test_check_for_update_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add and remove a number of tags
m.params['tags'] = {'One': 'one', 'Two': 'two'}
ec2_vpc_vpn.ensure_present(conn, m.params)
m.params['tags'] = {'Two': 'two', 'Three': 'three', 'Four': 'four'}
changes = ec2_vpc_vpn.check_for_update(conn, m.params, vpn['VpnConnectionId'])
flat_dict_changes = boto3_tag_list_to_ansible_dict(changes['tags_to_add'])
correct_changes = boto3_tag_list_to_ansible_dict([{'Key': 'Three', 'Value': 'three'}, {'Key': 'Four', 'Value': 'four'}])
assert flat_dict_changes == correct_changes
assert changes['tags_to_remove'] == ['One']
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
current_vgw = params['vpn_gateway_id']
# update a parameter that isn't modifiable
m.params.update(vpn_gateway_id="invalidchange")
err = 'You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are tags.'.format(current_vgw)
with pytest.raises(Exception) as error_message:
ec2_vpc_vpn.check_for_update(m, conn, vpn['VpnConnectionId'])
assert error_message == err
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# add a tag to the connection
ec2_vpc_vpn.add_tags(conn, vpn['VpnConnectionId'], add=[{'Key': 'Ansible-Test', 'Value': 'VPN'}])
# assert tag is there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert current_vpn['Tags'] == [{'Key': 'Ansible-Test', 'Value': 'VPN'}]
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_remove_tags(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# remove a tag from the connection
ec2_vpc_vpn.remove_tags(conn, vpn['VpnConnectionId'], remove=['Ansible-Test'])
# assert the tag is gone
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert 'Tags' not in current_vpn
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def test_add_routes(placeboify, maybe_sleep):
# setup dependencies for 1 vpn connection
dependencies = setup_req(placeboify, 1)
params, vpn, m, conn = dependencies['params'], dependencies['vpn'], dependencies['module'], dependencies['connection']
# create connection with a route
ec2_vpc_vpn.add_routes(conn, vpn['VpnConnectionId'], ['195.168.2.0/24', '196.168.2.0/24'])
# assert both routes are there
current_vpn = ec2_vpc_vpn.find_connection(conn, params)
assert set(each['DestinationCidrBlock'] for each in current_vpn['Routes']) == set(['195.168.2.0/24', '196.168.2.0/24'])
# delete connection
tear_down_conn(placeboify, conn, vpn['VpnConnectionId'])
def setup_req(placeboify, number_of_results=1):
''' returns dependencies for VPN connections '''
assert number_of_results in (1, 2)
results = []
cgw, vgw = get_dependencies()
for each in range(0, number_of_results):
params = make_params(cgw[each], vgw[each])
m, conn = setup_mod_conn(placeboify, params)
_, vpn = ec2_vpc_vpn.ensure_present(conn, params)
results.append({'module': m, 'connection': conn, 'vpn': vpn, 'params': params})
if number_of_results == 1:
return results[0]
else:
return results[0], results[1]
| gpl-3.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/django/contrib/admin/filters.py | 188 | 16608 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_model_from_relation, prepare_lookup_value, reverse_field_path,
)
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db import models
from django.utils import timezone
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
self.lookup_kwarg = '%s__%s__exact' % (field_path, field.target_field.name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = self.field_choices(field, request, model_admin)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
self.empty_value_display = model_admin.get_empty_value_display()
@property
def include_empty_choice(self):
"""
Return True if a "(None)" choice should be included, which filters
out everything except empty relationships.
"""
return self.field.null or (self.field.is_relation and self.field.many_to_many)
def has_output(self):
if self.include_empty_choice:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def field_choices(self, field, request, model_admin):
return field.get_choices(include_blank=False)
def choices(self, cl):
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if self.include_empty_choice:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: f.remote_field, RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = {k: v for k, v in params.items()
if k.startswith(self.field_generic)}
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.empty_value_display = model_admin.get_empty_value_display()
parent_model, reverse_path = reverse_field_path(model, field_path)
# Obey parent ModelAdmin queryset when deciding which options to show
if model == parent_model:
queryset = model_admin.get_queryset(request)
else:
queryset = parent_model._default_manager.all()
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': self.empty_value_display,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
class RelatedOnlyFieldListFilter(RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
limit_choices_to = {'pk__in': set(model_admin.get_queryset(request).values_list(field.name, flat=True))}
return field.get_choices(include_blank=False, limit_choices_to=limit_choices_to)
| mit |
madmax983/h2o-3 | py2/h2o_objects.py | 20 | 37914 | import sys, getpass, os, psutil, time, requests, errno, threading, inspect, shlex
import h2o_os_util, h2o_print as h2p, h2o_args
import h2o_nodes
from h2o_test import \
tmp_dir, tmp_file, flatfile_pathname, spawn_cmd, find_file, verboseprint, \
dump_json, log, check_sandbox_for_errors
import json, platform, re
from h2o_test import dump_json
# print "h2o_objects"
# used to drain stdout on the h2o objects below (before terminating a node)
def __drain(src, dst):
for l in src:
if type(dst) == type(0):
# got this with random data to parse.. why? it shows up in our stdout?
# UnicodeEncodeError: 'ascii' codec can't encode character u'\x86' in position 60:
# ordinal not in range(128)
# could we be getting unicode object?
try:
os.write(dst, l)
except:
# os.write(dst,"kbn: non-ascii char in the next line?")
os.write(dst,l.encode('utf8'))
else:
# FIX! this case probably can have the same issue?
dst.write(l)
dst.flush()
src.close()
if type(dst) == type(0):
os.close(dst)
def drain(src, dst):
t = threading.Thread(target=__drain, args=(src, dst))
t.daemon = True
t.start()
#*****************************************************************
class H2O(object):
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
force_ip=False, network=None,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
random_udp_drop=False, force_tcp=False,
redirect_import_folder_to_s3_path=None,
redirect_import_folder_to_s3n_path=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
disable_assertions=None,
sandbox_ignore_errors=False,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://172.16.2.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "172.16.2.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh4"
else: # ec2
hdfs_version = "0.20.2"
self.redirect_import_folder_to_s3_path = redirect_import_folder_to_s3_path
self.redirect_import_folder_to_s3n_path = redirect_import_folder_to_s3n_path
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.h2o_addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ip_from_cmd_line' dominates:
# ip_from_cmd_line and use_this_ip_addr shouldn't be used for mutli-node
if h2o_args.ip_from_cmd_line:
self.h2o_addr = h2o_args.ip_from_cmd_line
else:
self.h2o_addr = use_this_ip_addr
self.force_ip = force_ip or (self.h2o_addr!=None)
if self.h2o_addr:
self.http_addr = self.h2o_addr
else:
self.http_addr = h2o_args.python_cmd_ip
if h2o_args.network_from_cmd_line:
self.network = h2o_args.network_from_cmd_line
else:
self.network = network
# command line should always dominate for enabling
if h2o_args.debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = sandbox_ignore_errors
self.random_udp_drop = random_udp_drop
self.force_tcp = force_tcp
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
self.disable_assertions = disable_assertions
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
def url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
def do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, postData=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, **kwargs):
# if url param is used, use it as full url. otherwise create from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params2 = params.copy()
for k in params2:
if params2[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
extraComment2 = " " + str(postData)+";" if cmd=='post' else ""
extraComment2 += extraComment if extraComment else ""
if len(extraComment2) > 0:
log('Start ' + cmd.upper() + " " + url + paramsStr, comment=extraComment2)
else:
log('Start ' + cmd.upper() + " " + url + paramsStr)
# file get passed thru kwargs here
if h2o_args.no_timeout:
timeout = None # infinite
try:
if 'post' == cmd:
# NOTE == cmd: for now, since we don't have deserialization from JSON in h2o-dev, we use form-encoded POST.
# This is temporary.
#
# This following does application/json (aka, posting JSON in the body):
# r = requests.post(url, timeout=timeout, params=params, data=json.dumps(postData), **kwargs)
#
# This does form-encoded, which doesn't allow POST of nested structures
r = requests.post(url, timeout=timeout, params=params, data=postData, **kwargs)
elif 'delete' == cmd:
r = requests.delete(url, timeout=timeout, params=params, **kwargs)
elif 'get' == cmd:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
else:
raise ValueError("Unknown HTTP command (expected 'get', 'post' or 'delete'): " + cmd)
except Exception, e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
h2p.red_print(
"ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr))
time.sleep(2)
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name);
raise exc_info[1], None, exc_info[2]
if 200 != r.status_code:
print "JSON call returned non-200 status with ", (url + paramsStr)
print "r.status_code: " + str(r.status_code)
print "r.headers: " + repr(r.headers)
print "r.text: " + r.text
# fatal if no response
# FIX! why is this not working on bad response to GLM
# if not r:
# raise Exception("Maybe bad url? no r in do_json_request in %s:" % inspect.stack()[1][3])
# this is used to open a browser on results, or to redo the operation in the browser
# we don't' have that may urls flying around, so let's keep them all
# FIX! this doesn't work now with all the extra post data required?
h2o_nodes.json_url_history.append(r.url)
# if r.json():
# raise Exception("Maybe bad url? no r.json in do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
# h2o-dev sometimes is returning ISO-8859-2, Latin-2?
## print "apparent_coding", r.apparent_encoding
r.encoding = 'utf-8'
rjson = r.json()
except:
h2p.red_print("r.text:", r.text.encode('utf8'))
try:
# try to decode the r.text?
if not isinstance(json.loads(r.text), (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
except:
raise Exception("Could not decode any json from the request %s." % r.text)
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
print "rjson:", dump_json(rjson)
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print emsg
else:
print emsg
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
verboseprint(dump_json(rjson))
print 'rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w])
return rjson
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5):
'''Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts
'''
start = time.time()
numberOfRetries = 0
while h2o_args.no_timeout or (time.time() - start < timeoutSecs):
if test_func(self, tries=numberOfRetries, timeoutSecs=timeoutSecs):
break
time.sleep(retryDelaySecs)
numberOfRetries += 1
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
# to find the badness?. can check_sandbox_for_errors at any time
if ((numberOfRetries % 50) == 0):
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
else:
timeTakenSecs = time.time() - start
if isinstance(error, type('')):
raise Exception('%s failed after %.2f seconds having retried %d times' % (
error, timeTakenSecs, numberOfRetries))
else:
msg = error(self, timeTakenSecs, numberOfRetries)
raise Exception(msg)
def wait_for_node_to_accept_connections(self, nodeList, timeoutSecs=15, noExtraErrorCheck=False):
verboseprint("wait_for_node_to_accept_connections")
def test(n, tries=None, timeoutSecs=timeoutSecs):
try:
n.get_cloud(noExtraErrorCheck=noExtraErrorCheck, timeoutSecs=timeoutSecs)
return True
except requests.ConnectionError, e:
# Now using: requests 1.1.0 (easy_install --upgrade requests) 2/5/13
# Now: assume all requests.ConnectionErrors are H2O legal connection errors.
# Have trouble finding where the errno is, fine to assume all are good ones.
# Timeout check will kick in if continued H2O badness.
return False
# get their http addr to represent the nodes
expectedCloudStr = ",".join([str(n) for n in nodeList])
self.stabilize(test, error=('waiting for initial connection: Expected cloud %s' % expectedCloudStr),
timeoutSecs=timeoutSecs, # with cold cache's this can be quite slow
retryDelaySecs=0.1) # but normally it is very fast
def sandbox_error_report(self, done=None):
# not clearable..just or in new value
if done:
self.sandbox_error_was_reported = True
return (self.sandbox_error_was_reported)
def get_args(self):
args = ['java']
# I guess it doesn't matter if we use flatfile for both now
# defaults to not specifying
# FIX! we need to check that it's not outside the limits of the dram of the machine it's running on?
if self.java_heap_GB is not None:
if not (1 <= self.java_heap_GB <= 256):
raise Exception('java_heap_GB <1 or >256 (GB): %s' % (self.java_heap_GB))
args += ['-Xms%dG' % self.java_heap_GB]
args += ['-Xmx%dG' % self.java_heap_GB]
if self.java_heap_MB is not None:
if not (1 <= self.java_heap_MB <= 256000):
raise Exception('java_heap_MB <1 or >256000 (MB): %s' % (self.java_heap_MB))
args += ['-Xms%dm' % self.java_heap_MB]
args += ['-Xmx%dm' % self.java_heap_MB]
if self.java_extra_args is not None:
args += ['%s' % self.java_extra_args]
if self.use_debugger:
# currently hardwire the base port for debugger to 8000
# increment by one for every node we add
# sence this order is different than h2o cluster order, print out the ip and port for the user
# we could save debugger_port state per node, but not really necessary (but would be more consistent)
debuggerBasePort = 8000
if self.node_id is None:
debuggerPort = debuggerBasePort
else:
debuggerPort = debuggerBasePort + self.node_id
if self.http_addr:
a = self.http_addr
else:
a = "localhost"
if self.port:
b = str(self.port)
else:
b = "h2o determined"
# I guess we always specify port?
print "You can attach debugger at port %s for jvm at %s:%s" % (debuggerPort, a, b)
args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=%s' % debuggerPort]
if self.disable_assertions:
print "WARNING: h2o is running with assertions disabled"
else:
args += ["-ea"]
if self.use_maprfs:
args += ["-Djava.library.path=/opt/mapr/lib"]
if self.classpath:
entries = [find_file('build/classes'), find_file('lib/javassist.jar')]
entries += glob.glob(find_file('lib') + '/*/*.jar')
entries += glob.glob(find_file('lib') + '/*/*/*.jar')
args += ['-classpath', os.pathsep.join(entries), 'water.Boot']
else:
args += ["-jar", self.get_h2o_jar()]
if 1==1:
if self.hdfs_config:
args += [
'-hdfs_config ' + self.hdfs_config
]
if h2o_args.beta_features:
# no -beta
# args += ["-beta"]
pass
if self.network:
args += ["-network " + self.network]
# H2O should figure it out, if not specified
# DON"T EVER USE on multi-machine...h2o should always get it right, to be able to run on hadoop
# where it's not told
# new 10/22/14. Allow forcing the ip when we do remote, for networks with bridges, where
# h2o can't self identify (does -network work?)
if self.force_ip and self.h2o_addr: # should always have an addr if force_ip...but..
args += [
'-ip %s' % self.h2o_addr,
]
# Need to specify port, since there can be multiple ports for an ip in the flatfile
if self.port is not None:
args += [
"-port %d" % self.port,
]
if self.use_flatfile:
args += [
'-flatfile ' + self.flatfile,
]
args += [
'-ice_root %s' % self.get_ice_dir(),
# if I have multiple jenkins projects doing different h2o clouds, I need
# I need different ports and different cloud name.
# does different cloud name prevent them from joining up
# (even if same multicast ports?)
# I suppose I can force a base address. or run on another machine?
]
args += [
'-name ' + self.cloud_name
]
# ignore the other -hdfs args if the config is used?
if 1==0:
if self.hdfs_config:
args += [
'-hdfs_config ' + self.hdfs_config
]
# UPDATE: no longer valid to h2o?
if 1==0 and self.use_hdfs:
args += [
# it's fine if hdfs_name has a ":9000" port or something too
'-hdfs hdfs://' + self.hdfs_name_node,
'-hdfs_version ' + self.hdfs_version,
]
# UPDATE: no longer valid to h2o?
if 1==0 and self.use_maprfs:
args += [
# 3 slashes?
'-hdfs maprfs:///' + self.hdfs_name_node,
'-hdfs_version ' + self.hdfs_version,
]
if self.aws_credentials:
args += ['-aws_credentials ' + self.aws_credentials]
# passed thru build_cloud in test, or global from commandline arg
if self.random_udp_drop or h2o_args.random_udp_drop:
args += ['-random_udp_drop']
if self.force_tcp:
args += ['-force_tcp']
if self.disable_h2o_log:
args += ['-nolog']
args += ['-ga_opt_out']
# psutil psopen needs param/value in different arg elements
# othetwise we'd need to pass as joined string, and run /bin/sh
# this joins them up with space, then splits on space.
# works as long as no pathnames have embedded space, which should be true
# for unix, maybe not windows. For windows we join them as string before use in psopen
argsSplitByWhiteSpace = " ".join(args).split()
return argsSplitByWhiteSpace
#*****************************************************************
import h2o_methods
class LocalH2O(H2O):
'''An H2O instance launched by the python framework on the local host using psutil'''
def __init__(self, *args, **kwargs):
super(LocalH2O, self).__init__(*args, **kwargs)
self.rc = None
# FIX! no option for local /home/username ..always the sandbox (LOG_DIR)
self.ice = tmp_dir('ice.')
self.flatfile = flatfile_pathname()
# so we can tell if we're remote or local. Apparently used in h2o_import.py
self.remoteH2O = False
h2o_os_util.check_port_group(self.port)
h2o_os_util.show_h2o_processes()
if self.node_id is not None:
logPrefix = 'local-h2o-' + str(self.node_id)
else:
logPrefix = 'local-h2o'
# see https://docs.python.org/2/library/subprocess.html#subprocess.Popen
# for why I'm using shlex to split the cmd string into a sequence
# confusing issues, especially when thinking about windows too
# OS/build | os.name | platform.system()
# -------------+---------+-----------------------
# Win32 native | nt | Windows
# Win32 cygwin | posix | CYGWIN_NT-5.1*
# Win64 native | nt | Windows
# Win64 cygwin | posix | CYGWIN_NT-6.1-WOW64*
# Linux | posix | Linux
# make it a string if cygwin or windows
# in unix, the args was created with split by space. (assumption is no pathname has space)
# need to pass string in windows..doesn't seem to assemble string from args list correctly
# could make unix use shell and pass string?
pf = platform.system()
print "System is %s" % pf
cmd = self.get_args()
if re.match('win', pf, re.IGNORECASE): # covers cygwin and windows
cmd = " ".join(cmd)
spawn = spawn_cmd(logPrefix, cmd=cmd, capture_output=self.capture_output)
self.ps = spawn[0]
def get_h2o_jar(self):
return find_file('build/h2o.jar')
def get_flatfile(self):
return self.flatfile
# return find_file(flatfile_pathname())
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for LocalH2O", self.wait(0))
return self.wait(0) is None
def terminate_self_only(self):
def on_terminate(proc):
print("process {} terminated".format(proc))
waitingForKill = False
try:
# we already sent h2o shutdown and waited a second. Don't bother checking if alive still.
# send terminate...wait up to 3 secs, then send kill
self.ps.terminate()
gone, alive = wait_procs(procs=[self.ps], timeout=3, callback=on_terminate)
if alive:
self.ps.kill()
# from http://code.google.com/p/psutil/wiki/Documentation: wait(timeout=None) Wait for process termination
# If the process is already terminated does not raise NoSuchProcess exception but just return None immediately.
# If timeout is specified and process is still alive raises TimeoutExpired exception.
# hmm. maybe we're hitting the timeout
waitingForKill = True
return self.wait(timeout=3)
except psutil.NoSuchProcess:
return -1
except:
if waitingForKill:
# this means we must have got the exception on the self.wait()
# just print a message
print "\nUsed psutil to kill h2o process...but"
print "It didn't die within 2 secs. Maybe will die soon. Maybe not! At: %s" % self.http_addr
else:
print "Unexpected exception in terminate_self_only: ignoring"
# hack.
# psutil 2.x needs function reference
# psutil 1.x needs object reference
if hasattr(self.ps.cmdline, '__call__'):
pcmdline = self.ps.cmdline()
else:
pcmdline = self.ps.cmdline
print "process cmdline:", pcmdline
return -1
def terminate(self):
# send a shutdown request first.
# since local is used for a lot of buggy new code, also do the ps kill.
# try/except inside shutdown_all now
if self.is_alive():
print "\nShutdown didn't work fast enough for local node? : %s. Will kill though" % self
self.terminate_self_only()
def wait(self, timeout=0):
if self.rc is not None:
return self.rc
try:
self.rc = self.ps.wait(timeout)
return self.rc
except psutil.TimeoutExpired:
return None
def stack_dump(self):
self.ps.send_signal(signal.SIGQUIT)
# to see all the methods
# print dump_json(dir(LocalH2O))
#*****************************************************************
class RemoteH2O(H2O):
'''An H2O instance launched by the python framework on a specified host using openssh'''
def __init__(self, host, *args, **kwargs):
super(RemoteH2O, self).__init__(*args, **kwargs)
# it gets set True if an address is specified for LocalH2o init. Override.
if 'force_ip' in kwargs:
self.force_ip = kwargs['force_ip']
self.remoteH2O = True # so we can tell if we're remote or local
self.jar = host.upload_file('build/h2o.jar')
# need to copy the flatfile. We don't always use it (depends on h2o args)
self.flatfile = host.upload_file(flatfile_pathname())
# distribute AWS credentials
if self.aws_credentials:
self.aws_credentials = host.upload_file(self.aws_credentials)
if self.hdfs_config:
self.hdfs_config = host.upload_file(self.hdfs_config)
if self.use_home_for_ice:
# this will be the username used to ssh to the host
self.ice = "/home/" + host.username + '/ice.%d.%s' % (self.port, time.time())
else:
self.ice = '/tmp/ice.%d.%s' % (self.port, time.time())
self.channel = host.open_channel()
### FIX! TODO...we don't check on remote hosts yet
# this fires up h2o over there
cmd = ' '.join(self.get_args())
# UPDATE: somehow java -jar on cygwin target (xp) can't handle /tmp/h2o*jar
# because it's a windows executable and expects windows style path names.
# but if we cd into /tmp, it can do java -jar h2o*jar.
# So just split out the /tmp (pretend we don't know) and the h2o jar file name
# Newer windows may not have this problem? Do the ls (this goes into the local stdout
# files) so we can see the file is really where we expect.
# This hack only works when the dest is /tmp/h2o*jar. It's okay to execute
# with pwd = /tmp. If /tmp/ isn't in the jar path, I guess things will be the same as
# normal.
if 1 == 0: # enable if you want windows remote machines
cmdList = ["cd /tmp"] # separate by ;<space> when we join
cmdList += ["ls -ltr " + self.jar]
cmdList += [re.sub("/tmp/", "", cmd)]
self.channel.exec_command("; ".join(cmdList))
else:
self.channel.exec_command(cmd)
if self.capture_output:
if self.node_id is not None:
logPrefix = 'remote-h2o-' + str(self.node_id)
else:
logPrefix = 'remote-h2o'
logPrefix += '-' + host.h2o_addr
outfd, outpath = tmp_file(logPrefix + '.stdout.', '.log')
errfd, errpath = tmp_file(logPrefix + '.stderr.', '.log')
drain(self.channel.makefile(), outfd)
drain(self.channel.makefile_stderr(), errfd)
comment = 'Remote on %s, stdout %s, stderr %s' % (
self.h2o_addr, os.path.basename(outpath), os.path.basename(errpath))
else:
drain(self.channel.makefile(), sys.stdout)
drain(self.channel.makefile_stderr(), sys.stderr)
comment = 'Remote on %s' % self.h2o_addr
log(cmd, comment=comment)
def get_h2o_jar(self):
return self.jar
def get_flatfile(self):
return self.flatfile
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for RemoteH2O")
if self.channel.closed: return False
if self.channel.exit_status_ready(): return False
try:
self.get_cloud(noExtraErrorCheck=True)
return True
except:
return False
def terminate_self_only(self):
self.channel.close()
# Don't check afterwards. api watchdog in h2o might complain
if 1==0:
time.sleep(1) # a little delay needed?
# kbn: it should be dead now? want to make sure we don't have zombies
# we should get a connection error. doing a is_alive subset.
try:
gc_output = self.get_cloud(noExtraErrorCheck=True)
raise Exception("get_cloud() should fail after we terminate a node. It isn't. %s %s" % (self, gc_output))
except:
return True
def terminate(self):
self.terminate_self_only()
#*****************************************************************
class ExternalH2O(H2O):
'''A cloned H2O instance assumed to be created by others, that we can interact with via json requests (urls)
Gets initialized with state from json created by another build_cloud, so all methods should work 'as-if"
the cloud was built by the test (normally).
The normal build_cloud() parameters aren't passed here, the final node state is! (and used for init)
The list should be complete, as long as created by build_cloud(create_json=True) or
build_cloud_with_hosts(create_json=True)
Obviously, no psutil or paramiko work done here.
'''
def __init__(self, nodeState):
for k, v in nodeState.iteritems():
verboseprint("init:", k, v)
# hack because it looks like the json is currently created with "None" for values of None
# rather than worrying about that, just translate "None" to None here. "None" shouldn't exist
# for any other reason.
if v == "None":
v = None
elif v == "false":
v = False
elif v == "true":
v = True
# leave "null" as-is (string) for now?
setattr(self, k, v) # achieves self.k = v
## print "Cloned", len(nodeState), "things for a h2o node"
def is_alive(self):
verboseprint("Doing is_alive check for ExternalH2O")
try:
self.get_cloud()
return True
except:
return False
# no terminate_self_only method
def terminate_self_only(self):
raise Exception("terminate_self_only() not supported for ExternalH2O")
def terminate(self):
self.shutdown_all()
#*****************************************************************
class RemoteHost(object):
def upload_file(self, f, progress=None):
# FIX! we won't find it here if it's hdfs://172.16.2.151/ file
f = find_file(f)
if f not in self.uploaded:
start = time.time()
import md5
m = md5.new()
m.update(open(f).read())
m.update(getpass.getuser())
dest = '/tmp/' + m.hexdigest() + "-" + os.path.basename(f)
# sigh. we rm/create sandbox in build_cloud now
# (because nosetests doesn't exec h2o_main and we
# don't want to code "clean_sandbox()" in all the tests.
# So: we don't have a sandbox here, or if we do, we're going to delete it.
# Just don't log anything until build_cloud()? that should be okay?
# we were just logging this upload message..not needed.
# log('Uploading to %s: %s -> %s' % (self.http_addr, f, dest))
sftp = self.ssh.open_sftp()
# check if file exists on remote side
# does paramiko have issues with big files? (>1GB, or 650MB?). maybe we don't care.
# This would arise (as mentioned in the source, line no 667,
# http://www.lag.net/paramiko/docs/paramiko.sftp_client-pysrc.html) when there is
# any error reading the packet or when there is EOFError
# but I'm getting sftp close here randomly at sm.
# http://stackoverflow.com/questions/22708942/python-paramiko-module-error-with-callback
# http://stackoverflow.com/questions/15010540/paramiko-sftp-server-connection-dropped
# http://stackoverflow.com/questions/12322210/handling-paramiko-sshexception-server-connection-dropped
try:
# note we don't do a md5 compare. so if a corrupted file was uploaded we won't re-upload
# until we do another build.
sftp.stat(dest)
print "{0} Skipping upload of file {1}. File {2} exists on remote side!".format(self, f, dest)
except IOError, e:
# if self.channel.closed or self.channel.exit_status_ready():
# raise Exception("something bad happened to our %s being used for sftp. keepalive? %s %s" % \
# (self, self.channel.closed, self.channel.exit_status_ready()))
if e.errno == errno.ENOENT: # no such file or directory
verboseprint("{0} uploading file {1}".format(self, f))
sftp.put(f, dest, callback=progress)
# if you want to track upload times
### print "\n{0:.3f} seconds".format(time.time() - start)
elif e.errno == errno.EEXIST: # File Exists
pass
else:
print "Got unexpected errno: %s on paramiko sftp." % e.errno
print "Lookup here: https://docs.python.org/2/library/errno.html"
# throw the exception again, if not what we expected
exc_info = sys.exc_info()
raise exc_info[1], None, exc_info[2]
finally:
sftp.close()
self.uploaded[f] = dest
sys.stdout.flush()
return self.uploaded[f]
def record_file(self, f, dest):
'''Record a file as having been uploaded by external means'''
self.uploaded[f] = dest
def run_cmd(self, cmd):
log('Running `%s` on %s' % (cmd, self))
(stdin, stdout, stderr) = self.ssh.exec_command(cmd)
stdin.close()
sys.stdout.write(stdout.read())
sys.stdout.flush()
stdout.close()
sys.stderr.write(stderr.read())
sys.stderr.flush()
stderr.close()
def push_file_to_remotes(self, f, hosts):
dest = self.uploaded[f]
for h in hosts:
if h == self: continue
self.run_cmd('scp %s %s@%s:%s' % (dest, h.username, h.h2o_addr, dest))
h.record_file(f, dest)
def __init__(self, addr, username=None, password=None, **kwargs):
import paramiko
# To debug paramiko you can use the following code:
#paramiko.util.log_to_file('/tmp/paramiko.log')
#paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
# kbn. trying 9/23/13. Never specify -ip on java command line for multi-node
# but self.h2o_addr is used elsewhere. so look at self.remoteH2O to disable in get_args()
# by definition, this must be the publicly visible addrs, otherwise we can't ssh or browse!
self.h2o_addr = addr
self.http_addr = addr
self.username = username # this works, but it's host state
self.ssh = paramiko.SSHClient()
# don't require keys. If no password, assume passwordless setup was done
policy = paramiko.AutoAddPolicy()
self.ssh.set_missing_host_key_policy(policy)
self.ssh.load_system_host_keys()
if password is None:
self.ssh.connect(self.h2o_addr, username=username, **kwargs)
else:
self.ssh.connect(self.h2o_addr, username=username, password=password, **kwargs)
# keep connection - send keepalive packet evety 5minutes
self.ssh.get_transport().set_keepalive(300)
self.uploaded = {}
def remote_h2o(self, *args, **kwargs):
return RemoteH2O(self, self.h2o_addr, *args, **kwargs)
def open_channel(self):
ch = self.ssh.get_transport().open_session()
ch.get_pty() # force the process to die without the connection
return ch
def __str__(self):
return 'ssh://%s@%s' % (self.username, self.h2o_addr)
| apache-2.0 |
rotofly/odoo | openerp/cli/__init__.py | 135 | 2016 | import logging
import sys
import os
import openerp
from openerp import tools
from openerp.modules import module
_logger = logging.getLogger(__name__)
commands = {}
class CommandType(type):
def __init__(cls, name, bases, attrs):
super(CommandType, cls).__init__(name, bases, attrs)
name = getattr(cls, name, cls.__name__.lower())
cls.name = name
if name != 'command':
commands[name] = cls
class Command(object):
"""Subclass this class to define new openerp subcommands """
__metaclass__ = CommandType
def run(self, args):
pass
class Help(Command):
"""Display the list of available commands"""
def run(self, args):
print "Available commands:\n"
padding = max([len(k) for k in commands.keys()]) + 2
for k, v in commands.items():
print " %s%s" % (k.ljust(padding, ' '), v.__doc__ or '')
print "\nUse '%s <command> --help' for individual command help." % sys.argv[0].split(os.path.sep)[-1]
import server
import deploy
import scaffold
import start
def main():
args = sys.argv[1:]
# The only shared option is '--addons-path=' needed to discover additional
# commands from modules
if len(args) > 1 and args[0].startswith('--addons-path=') and not args[1].startswith("-"):
# parse only the addons-path, do not setup the logger...
tools.config._parse_config([args[0]])
args = args[1:]
# Default legacy command
command = "server"
# Subcommand discovery
if len(args) and not args[0].startswith("-"):
logging.disable(logging.CRITICAL)
for m in module.get_modules():
m = 'openerp.addons.' + m
__import__(m)
#try:
#except Exception, e:
# raise
# print e
logging.disable(logging.NOTSET)
command = args[0]
args = args[1:]
if command in commands:
o = commands[command]()
o.run(args)
# vim:et:ts=4:sw=4:
| agpl-3.0 |
inmomentsoftware/pledgeservice | testlib/setuptools/lib2to3_ex.py | 907 | 1998 | """
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| agpl-3.0 |
arante/pyloc | microblog/flask/lib/python3.5/site-packages/pip/commands/list.py | 168 | 7412 | from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, latest_version, typ in sorted(
self.find_packages_latest_versions(options),
key=lambda p: p[0].project_name.lower()):
if latest_version > dist.parsed_version:
logger.info(
'%s - Latest: %s [%s]',
self.output_package(dist), latest_version, typ,
)
def find_packages_latest_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
for dist in installed_packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
yield dist, remote_version, typ
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
self.output_package_listing(installed_packages)
def output_package(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
logger.info(self.output_package(dist))
def run_uptodate(self, options):
uptodate = []
for dist, version, typ in self.find_packages_latest_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
| gpl-3.0 |
bobthekingofegypt/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mpl-2.0 |
google/grr | grr/core/grr_response_core/lib/rdfvalues/client.py | 1 | 21251 | #!/usr/bin/env python
"""AFF4 RDFValue implementations for client information.
This module contains the RDFValue implementations used to communicate with the
client.
"""
import binascii
import hashlib
import logging
import platform
import re
import socket
import struct
import sys
from typing import Mapping, Text
import distro
import psutil
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import text
from grr_response_proto import jobs_pb2
from grr_response_proto import knowledge_base_pb2
from grr_response_proto import sysinfo_pb2
# ntop does not exist on Windows.
# pylint: disable=g-socket-inet-aton,g-socket-inet-ntoa
# We try to support PEP 425 style component names if possible. This makes it
# possible to have wheel as an optional dependency.
try:
# pytype: disable=import-error
from wheel import pep425tags # pylint: disable=g-import-not-at-top
# pytype: enable=import-error
except ImportError:
pep425tags = None
FS_ENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
_LOCALHOST = "localhost"
def _DecodeArgument(arg) -> Text:
if compatibility.PY2:
return arg.decode(FS_ENCODING)
else:
return arg
class ClientURN(rdfvalue.RDFURN):
"""A client urn has to have a specific form."""
# Valid client urns must match this expression.
CLIENT_ID_RE = re.compile(r"^(aff4:)?/?(?P<clientid>(c|C)\.[0-9a-fA-F]{16})$")
def __init__(self, initializer=None):
super().__init__(initializer)
if self._value and not self.Validate(self._value):
raise type_info.TypeValueError("Client urn malformed: %s" % initializer)
@classmethod
def _Normalize(cls, string):
normalized = super(ClientURN, cls)._Normalize(string.strip())
if normalized:
match = cls.CLIENT_ID_RE.match(normalized)
if not match:
raise type_info.TypeValueError(
"Client URN '{!r} from initializer {!r} malformed".format(
normalized, string))
clientid = match.group("clientid")
clientid_correctcase = "".join(
(clientid[0].upper(), clientid[1:].lower()))
normalized = normalized.replace(clientid, clientid_correctcase, 1)
return normalized
@classmethod
def Validate(cls, value):
if value:
return bool(cls.CLIENT_ID_RE.match(str(value)))
return False
@classmethod
def FromPrivateKey(cls, private_key):
return cls.FromPublicKey(private_key.GetPublicKey())
@classmethod
def FromPublicKey(cls, public_key):
"""An alternate constructor which generates a new client id."""
# Our CN will be the first 64 bits of the hash of the public key
# in MPI format - the length of the key in 4 bytes + the key
# prefixed with a 0. This weird format is an artifact from the way
# M2Crypto handled this, we have to live with it for now.
n = public_key.GetN()
raw_n = binascii.unhexlify("%x" % n)
mpi_format = struct.pack(">i", len(raw_n) + 1) + b"\x00" + raw_n
digest = text.Hexify(hashlib.sha256(mpi_format).digest()[:8])
return cls("C.{}".format(digest))
def Add(self, path):
"""Add a relative stem to the current value and return a new RDFURN.
Note that this returns an RDFURN, not a ClientURN since the resulting object
would not pass validation.
Args:
path: A string containing a relative path.
Returns:
A new RDFURN that can be chained.
Raises:
ValueError: if the path component is not a string.
"""
if not isinstance(path, str):
raise ValueError("Only strings should be added to a URN.")
return rdfvalue.RDFURN(utils.JoinPath(self._value, path))
class PCIDevice(rdf_structs.RDFProtoStruct):
"""A PCI device on the client.
This class describes a PCI device located on the client.
"""
protobuf = sysinfo_pb2.PCIDevice
class PackageRepository(rdf_structs.RDFProtoStruct):
"""Description of the configured repositories (Yum etc).
Describes the configured software package repositories.
"""
protobuf = sysinfo_pb2.PackageRepository
class ManagementAgent(rdf_structs.RDFProtoStruct):
"""Description of the running management agent (puppet etc).
Describes the state, last run timestamp, and name of the management agent
installed on the system.
"""
protobuf = sysinfo_pb2.ManagementAgent
rdf_deps = [
rdfvalue.RDFDatetime,
]
class PwEntry(rdf_structs.RDFProtoStruct):
"""Information about password structures."""
protobuf = knowledge_base_pb2.PwEntry
class Group(rdf_structs.RDFProtoStruct):
"""Information about system posix groups."""
protobuf = knowledge_base_pb2.Group
rdf_deps = [
PwEntry,
]
class User(rdf_structs.RDFProtoStruct):
"""Information about the users."""
protobuf = knowledge_base_pb2.User
rdf_deps = [
PwEntry,
rdfvalue.RDFDatetime,
]
def __init__(self, initializer=None, **kwargs):
if isinstance(initializer, KnowledgeBaseUser):
# KnowledgeBaseUser was renamed to User, the protos are identical. This
# allows for backwards compatibility with clients returning KBUser
# objects.
# TODO(user): remove once all clients are newer than 3.0.7.1.
initializer = User.FromSerializedBytes(initializer.SerializeToBytes())
super().__init__(initializer=initializer, **kwargs)
class KnowledgeBaseUser(User):
"""Backwards compatibility for old clients.
Linux client action EnumerateUsers previously returned KnowledgeBaseUser
objects.
"""
class KnowledgeBase(rdf_structs.RDFProtoStruct):
"""Information about the system and users."""
protobuf = knowledge_base_pb2.KnowledgeBase
rdf_deps = [
User,
]
def _CreateNewUser(self, kb_user):
self.users.Append(kb_user)
return ["users.%s" % k for k in kb_user.AsDict()]
def MergeOrAddUser(self, kb_user):
"""Merge a user into existing users or add new if it doesn't exist.
Args:
kb_user: A User rdfvalue.
Returns:
A list of strings with the set attribute names, e.g. ["users.sid"]
"""
user = self.GetUser(
sid=kb_user.sid, uid=kb_user.uid, username=kb_user.username)
new_attrs = []
merge_conflicts = [] # Record when we overwrite a value.
if not user:
new_attrs = self._CreateNewUser(kb_user)
else:
for key, val in kb_user.AsDict().items():
if user.Get(key) and user.Get(key) != val:
merge_conflicts.append((key, user.Get(key), val))
user.Set(key, val)
new_attrs.append("users.%s" % key)
return new_attrs, merge_conflicts
def GetUser(self, sid=None, uid=None, username=None):
"""Retrieve a User based on sid, uid or username.
On windows we first get a SID and use it to find the username. We want to
avoid combining users with name collisions, which occur when local users
have the same username as domain users (something like Admin is particularly
common). So if a SID is provided, don't also try to match by username.
On linux we first get a username, then use this to find the UID, so we want
to combine these records or we end up with multiple partially-filled user
records.
TODO(user): this won't work at all well with a query for uid=0 because
that is also the default for User objects that don't have uid
set.
Args:
sid: Windows user sid
uid: Linux/Darwin user id
username: string
Returns:
rdf_client.User or None
"""
if sid:
for user in self.users:
if user.sid == sid:
return user
return None
if uid:
for user in self.users:
if user.uid == uid:
return user
if username:
for user in self.users:
if user.username == username:
# Make sure we aren't combining different uids if we know them
# user.uid = 0 is the default, which makes this more complicated.
if uid and user.uid and user.uid != uid:
return None
else:
return user
def GetKbFieldNames(self):
fields = set(self.type_infos.descriptor_names)
# DEPRECATED_users field is maintained for backwards compatibility reasons
# only, as type of the "users" field has changed from jobs.User to
# knowledge_base.User.
fields.remove("DEPRECATED_users")
# We used to have a "hostname" field that contained an fqdn and is
# therefore now renamed to "fqdn".
# TODO(amoser): Remove once the artifact has removed the provides
# section upstream.
fields.add("hostname")
fields.remove("users")
for field in self.users.type_descriptor.type.type_infos.descriptor_names:
fields.add("users.%s" % field)
return sorted(fields)
class HardwareInfo(rdf_structs.RDFProtoStruct):
"""Various hardware information."""
protobuf = sysinfo_pb2.HardwareInfo
class ClientInformation(rdf_structs.RDFProtoStruct):
"""The GRR client information."""
protobuf = jobs_pb2.ClientInformation
class BufferReference(rdf_structs.RDFProtoStruct):
"""Stores information about a buffer in a file on the client."""
protobuf = jobs_pb2.BufferReference
rdf_deps = [
rdf_paths.PathSpec,
]
def __eq__(self, other):
return self.data == other
class Process(rdf_structs.RDFProtoStruct):
"""Represent a process on the client."""
protobuf = sysinfo_pb2.Process
rdf_deps = [
rdf_client_network.NetworkConnection,
]
@classmethod
def FromPsutilProcess(cls, psutil_process):
response = cls()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
with psutil_process.oneshot():
for field in process_fields:
try:
value = getattr(psutil_process, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, int):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
response.cmdline = list(map(_DecodeArgument, psutil_process.cmdline()))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
except Exception as e: # pylint: disable=broad-except
# Windows now has Virtual Secure Mode (VSM) processes that have
# additional memory protection. For those, cmdline() and cwd() will
# raise a Windows Error 998 (ERROR_NOACCESS, Invalid access to memory
# location).
if not hasattr(e, "winerror") or e.winerror != 998: # pytype: disable=attribute-error
raise
try:
response.nice = psutil_process.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(psutil_process, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = psutil_process.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = psutil_process.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = int(psutil_process.create_time() * 1e6)
response.status = str(psutil_process.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(psutil_process, "cwd"):
response.cwd = utils.SmartUnicode(psutil_process.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# We have seen psutil on macos raise OSError here with errno 2 - ENOENT -
# but we haven't managed to reproduce this behavior. This might be fixed
# in more recent versions of psutil but we play it safe and just catch the
# error.
except OSError:
pass
except Exception as e: # pylint: disable=broad-except
# Windows now has Virtual Secure Mode (VSM) processes that have
# additional memory protection. For those, cmdline() and cwd() will
# raise a Windows Error 998 (ERROR_NOACCESS, Invalid access to memory
# location).
if not hasattr(e, "winerror") or e.winerror != 998: # pytype: disable=attribute-error
raise
try:
response.num_threads = psutil_process.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
cpu_times = psutil_process.cpu_times()
response.user_cpu_time = cpu_times.user
response.system_cpu_time = cpu_times.system
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = psutil_process.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
pmem = psutil_process.memory_info()
response.RSS_size = pmem.rss # pylint: disable=invalid-name
response.VMS_size = pmem.vms # pylint: disable=invalid-name
response.memory_percent = psutil_process.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in psutil_process.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in psutil_process.connections():
conn = response.connections.Append(
family=c.family, type=c.type, pid=psutil_process.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
return response
class NamedPipe(rdf_structs.RDFProtoStruct):
"""An RDF wrapper for the named pipe message."""
protobuf = sysinfo_pb2.NamedPipe
rdf_deps = []
class SoftwarePackage(rdf_structs.RDFProtoStruct):
"""Represent an installed package on the client."""
protobuf = sysinfo_pb2.SoftwarePackage
@classmethod
def Installed(cls, **kwargs):
return SoftwarePackage(
install_state=SoftwarePackage.InstallState.INSTALLED, **kwargs)
@classmethod
def Pending(cls, **kwargs):
return SoftwarePackage(
install_state=SoftwarePackage.InstallState.PENDING, **kwargs)
@classmethod
def Uninstalled(cls, **kwargs):
return SoftwarePackage(
install_state=SoftwarePackage.InstallState.UNINSTALLED, **kwargs)
class SoftwarePackages(rdf_structs.RDFProtoStruct):
"""A list of installed packages on the system."""
protobuf = sysinfo_pb2.SoftwarePackages
rdf_deps = [
SoftwarePackage,
]
class LogMessage(rdf_structs.RDFProtoStruct):
"""A log message sent from the client to the server."""
protobuf = jobs_pb2.LogMessage
class Uname(rdf_structs.RDFProtoStruct):
"""A protobuf to represent the current system."""
protobuf = jobs_pb2.Uname
rdf_deps = [
rdfvalue.RDFDatetime,
]
@property
def arch(self):
"""Return a more standard representation of the architecture."""
if self.machine in ["x86_64", "AMD64", "i686"]:
# 32 bit binaries running on AMD64 will still have a i386 arch.
if self.architecture == "32bit":
return "i386"
return "amd64"
elif self.machine == "x86":
return "i386"
return self.machine
def signature(self):
"""Returns a unique string that encapsulates the architecture."""
# If the protobuf contains a proper pep425 tag return that.
result = self.pep425tag
if result:
return result
raise ValueError("PEP 425 Signature not set - this is likely an old "
"component file, please back it up and remove it.")
@classmethod
def FromCurrentSystem(cls):
"""Fill a Uname from the currently running platform."""
uname = platform.uname()
fqdn = socket.getfqdn()
if fqdn == _LOCALHOST:
# Avoid returning 'localhost' when there is a better value to use.
fqdn = socket.gethostname()
system = uname[0]
architecture, _ = platform.architecture()
if system == "Windows":
service_pack = platform.win32_ver()[2]
kernel = uname[3] # 5.1.2600
release = uname[2] # XP, 2000, 7
version = uname[3] + service_pack # 5.1.2600 SP3, 6.1.7601 SP1
elif system == "Darwin":
kernel = uname[2] # 12.2.0
release = "OSX" # OSX
version = platform.mac_ver()[0] # 10.8.2
elif system == "Linux":
kernel = uname[2] # 3.2.5
release = distro.name()
version = distro.version()
# Emulate PEP 425 naming conventions - e.g. cp27-cp27mu-linux_x86_64.
if pep425tags:
try:
# 0.33.6
pep_platform = pep425tags.get_platform()
except TypeError:
# 0.34.2
pep_platform = pep425tags.get_platform(None)
pep425tag = "%s%s-%s-%s" % (
pep425tags.get_abbr_impl(), pep425tags.get_impl_ver(),
str(pep425tags.get_abi_tag()).lower(), pep_platform)
else:
# For example: windows_7_amd64
pep425tag = "%s_%s_%s" % (system, release, architecture)
return cls(
system=system,
architecture=architecture,
release=release,
version=version,
machine=uname[4], # x86, x86_64
kernel=kernel,
fqdn=fqdn,
pep425tag=pep425tag,
)
class StartupInfo(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.StartupInfo
rdf_deps = [
ClientInformation,
rdfvalue.RDFDatetime,
]
class WindowsServiceInformation(rdf_structs.RDFProtoStruct):
"""Windows Service."""
protobuf = sysinfo_pb2.WindowsServiceInformation
rdf_deps = [
rdf_protodict.Dict,
rdf_client_fs.StatEntry,
]
class OSXServiceInformation(rdf_structs.RDFProtoStruct):
"""OSX Service (launchagent/daemon)."""
protobuf = sysinfo_pb2.OSXServiceInformation
rdf_deps = [
rdfvalue.RDFURN,
]
class LinuxServiceInformation(rdf_structs.RDFProtoStruct):
"""Linux Service (init/upstart/systemd)."""
protobuf = sysinfo_pb2.LinuxServiceInformation
rdf_deps = [
rdf_protodict.AttributedDict,
SoftwarePackage,
rdf_client_fs.StatEntry,
]
# Start of the Registry Specific Data types
class RunKey(rdf_structs.RDFProtoStruct):
protobuf = sysinfo_pb2.RunKey
class RunKeyEntry(rdf_protodict.RDFValueArray):
"""Structure of a Run Key entry with keyname, filepath, and last written."""
rdf_type = RunKey
class ClientCrash(rdf_structs.RDFProtoStruct):
"""Details of a client crash."""
protobuf = jobs_pb2.ClientCrash
rdf_deps = [
ClientInformation,
ClientURN,
rdfvalue.RDFDatetime,
rdfvalue.SessionID,
]
class EdrAgent(rdf_structs.RDFProtoStruct):
"""An RDF wrapper for protobuf message containing EDR agent metadata."""
protobuf = jobs_pb2.EdrAgent
rdf_deps = []
class FleetspeakValidationInfoTag(rdf_structs.RDFProtoStruct):
"""Dictionary entry in FleetspeakValidationInfo."""
protobuf = jobs_pb2.FleetspeakValidationInfoTag
class FleetspeakValidationInfo(rdf_structs.RDFProtoStruct):
"""Dictionary-like struct containing Fleetspeak ValidationInfo."""
protobuf = jobs_pb2.FleetspeakValidationInfo
rdf_deps = [FleetspeakValidationInfoTag]
@classmethod
def FromStringDict(cls, dct: Mapping[str, str]) -> "FleetspeakValidationInfo":
instance = cls()
for key, value in dct.items():
instance.tags.Append(key=key, value=value)
return instance
def ToStringDict(self) -> Mapping[str, str]:
return {tag.key: tag.value for tag in self.tags}
class ClientSummary(rdf_structs.RDFProtoStruct):
"""Object containing client's summary data."""
protobuf = jobs_pb2.ClientSummary
rdf_deps = [
ClientInformation,
ClientURN,
EdrAgent,
rdf_client_network.Interface,
rdfvalue.RDFDatetime,
Uname,
User,
FleetspeakValidationInfo,
]
class VersionString(rdfvalue.RDFString):
@property
def versions(self):
result = []
for x in str(self).split("."):
try:
result.append(int(x))
except ValueError:
break
return result
| apache-2.0 |
cermegno/Redis-test | _V3/step3.py | 1 | 1699 | #!/usr/bin/env python3
import os
import redis
import json
from flask import Flask, render_template, redirect, request, url_for, make_response
if 'VCAP_SERVICES' in os.environ:
VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES'])
CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"]
r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"])
else:
r = redis.Redis(host='127.0.0.1', port='6379')
app = Flask(__name__)
@app.route('/')
def survey():
resp = make_response(render_template('survey.html'))
return resp
@app.route('/suthankyou.html', methods=['POST'])
def suthankyou():
d = request.form['division']
s = request.form['state']
f = request.form['feedback']
#### It gets stored as 'str'
##print (type(d))
print ("Division is " + d)
print ("State is " + s)
print ("Feedback: " + f)
print ("Storing the survey now")
r.hmset('newsurvey',{'division':d,'state':str(s),'feedback':f})
#### When retrieved is 'bytes'
##print (type(r.hget('newsurvey','division')))
print ("--------------------------")
print ("Reading it back from Redis")
####Need to use str(bytes_string, 'utf-8') to display properly without b''
print ("Division : ", str(r.hget('newsurvey','division'),'utf-8'))
print ("State : ", str(r.hget('newsurvey','state'),'utf-8'))
print ("Feedback : ", str(r.hget('newsurvey','feedback'),'utf-8'))
resp = """
<h3> - THANKS FOR TAKING THE SURVEY - </h3>
"""
return resp
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', \
port=int(os.getenv('PORT', '5000')), threaded=True)
| mit |
Integral-Technology-Solutions/ConfigNOW-4.3 | Lib/xml/dom/minidom.py | 3 | 30458 | """\
minidom.py -- a lightweight DOM implementation.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import string
_string = string
del string
from xml.dom import HierarchyRequestErr
# localize the types, and allow support for Unicode values if available:
import types
_TupleType = types.TupleType
try:
_StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
_StringTypes = (types.StringType,)
del types
import xml.dom
_Node = xml.dom.Node
class Node(_Node):
allnodes = {}
_debug = 0
_makeParentNodes = 1
debug = None
childNodeTypes = ()
namespaceURI = None # this is non-null only for elements and attributes
def __init__(self):
self.childNodes = []
self.parentNode = self.ownerDocument = None
if Node._debug:
index = repr(id(self)) + repr(self.__class__)
Node.allnodes[index] = repr(self.__dict__)
if Node.debug is None:
Node.debug = _get_StringIO()
#open("debug4.out", "w")
Node.debug.write("create %s\n" % index)
def __getattr__(self, key):
if key[0:2] == "__":
raise AttributeError, key
# getattr should never call getattr!
if self.__dict__.has_key("inGetAttr"):
del self.inGetAttr
raise AttributeError, key
prefix, attrname = key[:5], key[5:]
if prefix == "_get_":
self.inGetAttr = 1
if hasattr(self, attrname):
del self.inGetAttr
return (lambda self=self, attrname=attrname:
getattr(self, attrname))
else:
del self.inGetAttr
raise AttributeError, key
else:
self.inGetAttr = 1
try:
func = getattr(self, "_get_" + key)
except AttributeError:
raise AttributeError, key
del self.inGetAttr
return func()
def __nonzero__(self):
return 1
def toxml(self):
writer = _get_StringIO()
self.writexml(writer)
return writer.getvalue()
def toprettyxml(self, indent="\t", newl="\n"):
# indent = the indentation string to prepend, per level
# newl = the newline string to append
writer = _get_StringIO()
self.writexml(writer, "", indent, newl)
return writer.getvalue()
def hasChildNodes(self):
if self.childNodes:
return 1
else:
return 0
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in newChild.childNodes:
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self.childNodeTypes:
raise HierarchyRequestErr, \
"%s cannot be child of %s" % (repr(newChild), repr(self))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
index = self.childNodes.index(refChild)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
if self._makeParentNodes:
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in node.childNodes:
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self.childNodeTypes:
raise HierarchyRequestErr, \
"%s cannot be child of %s" % (repr(node), repr(self))
if node.parentNode is not None:
node.parentNode.removeChild(node)
if self.childNodes:
last = self.lastChild
node.previousSibling = last
last.nextSibling = node
else:
node.previousSibling = None
node.nextSibling = None
self.childNodes.append(node)
if self._makeParentNodes:
node.parentNode = self
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self.childNodeTypes:
raise HierarchyRequestErr, \
"%s cannot be child of %s" % (repr(newChild), repr(self))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if newChild is oldChild:
return
index = self.childNodes.index(oldChild)
self.childNodes[index] = newChild
if self._makeParentNodes:
newChild.parentNode = self
oldChild.parentNode = None
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
self.childNodes.remove(oldChild)
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if self._makeParentNodes:
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
data = child.data
if data and L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.nodeValue = node.data + child.data
node.nextSibling = child.nextSibling
child.unlink()
elif data:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
else:
# empty text node; discard
child.unlink()
else:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
import new
clone = new.instance(self.__class__, self.__dict__.copy())
if self._makeParentNodes:
clone.parentNode = None
clone.childNodes = []
if deep:
for child in self.childNodes:
clone.appendChild(child.cloneNode(1))
return clone
# DOM Level 3 (Working Draft 2001-Jan-26)
def isSameNode(self, other):
return self is other
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
for child in self.childNodes:
child.unlink()
self.childNodes = None
self.previousSibling = None
self.nextSibling = None
if Node._debug:
index = repr(id(self)) + repr(self.__class__)
self.debug.write("Deleting: %s\n" % index)
del Node.allnodes[index]
def _write_data(writer, data):
"Writes datachars to writer."
replace = _string.replace
data = replace(data, "&", "&")
data = replace(data, "<", "<")
data = replace(data, "\"", """)
data = replace(data, ">", ">")
writer.write(data)
def _getElementsByTagNameHelper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_getElementsByTagNameHelper(node, name, rc)
return rc
def _getElementsByTagNameNSHelper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_getElementsByTagNameNSHelper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
childNodeTypes = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
attributes = None
ownerElement = None
childNodeTypes = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI="", localName=None, prefix=None):
# skip setattr for performance
d = self.__dict__
d["localName"] = localName or qName
d["nodeName"] = d["name"] = qName
d["namespaceURI"] = namespaceURI
d["prefix"] = prefix
Node.__init__(self)
# nodeValue and value are set elsewhere
def __setattr__(self, name, value):
d = self.__dict__
if name in ("value", "nodeValue"):
d["value"] = d["nodeValue"] = value
elif name in ("name", "nodeName"):
d["name"] = d["nodeName"] = value
else:
d[name] = value
def cloneNode(self, deep):
clone = Node.cloneNode(self, deep)
if clone.__dict__.has_key("ownerElement"):
del clone.ownerElement
return clone
class NamedNodeMap:
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
def __init__(self, attrs, attrsNS):
self._attrs = attrs
self._attrsNS = attrsNS
def __getattr__(self, name):
if name == "length":
return len(self._attrs)
raise AttributeError, name
def item(self, index):
try:
return self[self._attrs.keys()[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.URI, node.localName), node.value))
return L
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value = None):
return self._attrs.get(name, value)
def __len__(self):
return self.length
def __cmp__(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return cmp(id(self), id(other))
#FIXME: is it appropriate to return .value?
def __getitem__(self, attname_or_tuple):
if type(attname_or_tuple) is _TupleType:
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if type(value) in _StringTypes:
node = Attr(attname)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError, "value must be a string or Attr object"
node = value
self.setNamedItem(node)
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise HierarchyRequestErr, \
"%s cannot be child of %s" % (repr(node), repr(self))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
node.unlink()
del self._attrs[node.name]
del self._attrsNS[(node.namespaceURI, node.localName)]
self.length = len(self._attrs)
AttributeList = NamedNodeMap
class Element(Node):
nodeType = Node.ELEMENT_NODE
nextSibling = None
previousSibling = None
childNodeTypes = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.TEXT_NODE,
Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=None, prefix="",
localName=None):
Node.__init__(self)
self.tagName = self.nodeName = tagName
self.localName = localName or tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.nodeValue = None
self._attrs = {} # attributes are double-indexed:
self._attrsNS = {} # tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
def cloneNode(self, deep):
clone = Node.cloneNode(self, deep)
clone._attrs = {}
clone._attrsNS = {}
for attr in self._attrs.values():
node = attr.cloneNode(1)
clone._attrs[node.name] = node
clone._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = clone
return clone
def unlink(self):
for attr in self._attrs.values():
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = Attr(attname)
# for performance
attr.__dict__["value"] = attr.__dict__["nodeValue"] = value
self.setAttributeNode(attr)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
# for performance
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.__dict__["value"] = attr.__dict__["nodeValue"] = value
self.setAttributeNode(attr)
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
old = self._attrs.get(attr.name, None)
if old:
old.unlink()
self._attrs[attr.name] = attr
self._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = self
if old is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
attr = self._attrs[name]
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
attr = self._attrsNS[(namespaceURI, localName)]
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
node.unlink()
del self._attrs[node.name]
del self._attrsNS[(node.namespaceURI, node.localName)]
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
return self._attrs.has_key(name)
def hasAttributeNS(self, namespaceURI, localName):
return self._attrsNS.has_key((namespaceURI, localName))
def getElementsByTagName(self, name):
return _getElementsByTagNameHelper(self, name, [])
def getElementsByTagNameNS(self, namespaceURI, localName):
return _getElementsByTagNameNSHelper(self, namespaceURI, localName, [])
def __repr__(self):
return "<DOM Element: %s at %s>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">%s"%(newl))
for node in self.childNodes:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
return AttributeList(self._attrs, self._attrsNS)
def hasAttributes(self):
if self._attrs or self._attrsNS:
return 1
else:
return 0
class Comment(Node):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
attributes = None
childNodeTypes = ()
def __init__(self, data):
Node.__init__(self)
self.data = self.nodeValue = data
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<!--%s-->%s" % (indent,self.data,newl))
class ProcessingInstruction(Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
attributes = None
childNodeTypes = ()
def __init__(self, target, data):
Node.__init__(self)
self.target = self.nodeName = target
self.data = self.nodeValue = data
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Node):
def __init__(self, data):
if type(data) not in _StringTypes:
raise TypeError, "node contents must be a string"
Node.__init__(self)
self.data = self.nodeValue = data
self.length = len(data)
def __repr__(self):
if len(self.data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return "<DOM %s node \"%s%s\">" % (
self.__class__.__name__, self.data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
self.nodeValue = self.data
self.length = len(self.data)
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
self.nodeValue = self.data
self.length = len(self.data)
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
self.nodeValue = self.data
self.length = len(self.data)
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
self.nodeValue = self.data
self.length = len(self.data)
class Text(CharacterData):
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
childNodeTypes = ()
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = Text(self.data[offset:])
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
self.nodeValue = self.data
self.length = len(self.data)
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s"%(indent, self.data, newl))
class CDATASection(Text):
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "<![CDATA[%s]]>" % self.data)
def _nssplit(qualifiedName):
fields = _string.split(qualifiedName, ':', 1)
if len(fields) == 2:
return fields
elif len(fields) == 1:
return ('', fields[0])
class DocumentType(Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
attributes = None
name = None
publicId = None
systemId = None
internalSubset = None
entities = None
notations = None
def __init__(self, qualifiedName):
Node.__init__(self)
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
class DOMImplementation:
def hasFeature(self, feature, version):
if version not in ("1.0", "2.0"):
return 0
feature = _string.lower(feature)
return feature == "core"
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._createDocument()
if doctype is None:
doctype = self.createDocumentType(qualifiedName, None, None)
if not qualifiedName:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
raise xml.dom.InvalidCharacterErr("Element with no name")
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
doc.appendChild(element)
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# internal
def _createDocument(self):
return Document()
class Document(Node):
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
doctype = None
parentNode = None
previousSibling = nextSibling = None
implementation = DOMImplementation()
childNodeTypes = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
def appendChild(self, node):
if node.nodeType not in self.childNodeTypes:
raise HierarchyRequestErr, \
"%s cannot be child of %s" % (repr(node), repr(self))
if node.parentNode is not None:
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
self.childNodes.remove(oldChild)
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDoc = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
t = Text(data)
t.ownerDocument = self
return t
def createCDATASection(self, data):
c = CDATASection(data)
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix, localName)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
def getElementsByTagName(self, name):
return _getElementsByTagNameHelper(self, name, [])
def getElementsByTagNameNS(self, namespaceURI, localName):
return _getElementsByTagNameNSHelper(self, namespaceURI, localName, [])
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write('<?xml version="1.0" ?>\n')
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
def _get_StringIO():
# we can't use cStringIO since it doesn't support Unicode strings
from StringIO import StringIO
return StringIO()
def _doparse(func, args, kwargs):
events = apply(func, args, kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(*args, **kwargs):
"""Parse a file into a DOM by filename or file object."""
from xml.dom import pulldom
return _doparse(pulldom.parse, args, kwargs)
def parseString(*args, **kwargs):
"""Parse a file into a DOM from a string."""
from xml.dom import pulldom
return _doparse(pulldom.parseString, args, kwargs)
def getDOMImplementation():
return Document.implementation
| mit |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/xenserver/xenserver_guest.py | 11 | 98114 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: xenserver_guest
short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
modify various virtual machine components like network and disk, rename a virtual machine and
remove a virtual machine with associated components.
version_added: '2.8'
author:
- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
notes:
- Minimal supported version of XenServer is 5.6.
- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI.py'
- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
values C(none) and C(dhcp) have same effect. More info here:
https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html'
- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
https://support.citrix.com/article/CTX226713'
requirements:
- python >= 2.6
- XenAPI
options:
state:
description:
- Specify the state VM should be in.
- If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
- If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
- If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
- If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
type: str
default: present
choices: [ present, absent, poweredon ]
name:
description:
- Name of the VM to work with.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
required: yes
aliases: [ name_label ]
name_desc:
description:
- VM description.
type: str
uuid:
description:
- UUID of the VM to manage if known. This is XenServer's unique identifier.
- It is required if name is not unique.
- Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
type: str
template:
description:
- Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
- Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
- In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
- If VM already exists, this setting will be ignored.
- This parameter is case sensitive.
type: str
aliases: [ template_src ]
template_uuid:
description:
- UUID of a template, an existing VM or a snapshot that should be used to create VM.
- It is required if template name is not unique.
type: str
is_template:
description:
- Convert VM to template.
type: bool
default: no
folder:
description:
- Destination folder for VM.
- This parameter is case sensitive.
- 'Example:'
- ' folder: /folder1/folder2'
type: str
hardware:
description:
- Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
- 'Valid parameters are:'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
type: dict
disks:
description:
- A list of disks to add to VM.
- All parameters are case sensitive.
- Removing or detaching existing disks of VM is not supported.
- 'Required parameters per entry:'
- ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
- 'Optional parameters per entry:'
- ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
- ' - C(name_desc) (string): Disk description.'
- ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
- ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
type: list
aliases: [ disk ]
cdrom:
description:
- A CD-ROM configuration for the VM.
- All parameters are case sensitive.
- 'Valid parameters are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
- ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
Required if C(type) is set to C(iso).'
type: dict
networks:
description:
- A list of networks (in the order of the NICs).
- All parameters are case sensitive.
- 'Required parameters per entry:'
- ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
- 'Optional parameters per entry (used for VM hardware):'
- ' - C(mac) (string): Customize MAC address of the interface.'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
- ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
- ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
- ' - C(gateway) (string): Static IPv4 gateway.'
- ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
- ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
- ' - C(gateway6) (string): Static IPv6 gateway.'
type: list
aliases: [ network ]
home_server:
description:
- Name of a XenServer host that will be a Home Server for the VM.
- This parameter is case sensitive.
type: str
custom_params:
description:
- Define a list of custom VM params to set on VM.
- Useful for advanced users familiar with managing VM params trough xe CLI.
- A custom value object takes two fields C(key) and C(value) (see example below).
type: list
wait_for_ip_address:
description:
- Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
- This requires XenServer Tools to be preinstalled on the VM to work properly.
type: bool
default: no
state_change_timeout:
description:
- 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- In case of timeout, module will generate an error message.
type: int
default: 0
linked_clone:
description:
- Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
- This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
type: bool
default: no
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
type: bool
default: no
extends_documentation_fragment: xenserver.documentation
'''
EXAMPLES = r'''
- name: Create a VM from a template
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: CentOS 7
disks:
- size_gb: 10
sr: my_sr
hardware:
num_cpus: 6
num_cpu_cores_per_socket: 3
memory_mb: 512
cdrom:
type: iso
iso_name: guest-tools.iso
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Create a VM template
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
validate_certs: no
folder: /testvms
name: testvm_6
is_template: yes
disk:
- size_gb: 10
sr: my_sr
hardware:
memory_mb: 512
num_cpus: 1
delegate_to: localhost
register: deploy
- name: Rename a VM (requires the VM's UUID)
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
name: new_name
state: present
delegate_to: localhost
- name: Remove a VM by UUID
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: absent
delegate_to: localhost
- name: Modify custom params (boot order)
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
name: testvm_8
state: present
custom_params:
- key: HVM_boot_params
value: { "order": "ndc" }
delegate_to: localhost
- name: Customize network parameters
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
name: testvm_10
networks:
- name: VM Network
ip: 192.168.1.100/24
gateway: 192.168.1.1
- type: dhcp
delegate_to: localhost
'''
RETURN = r'''
instance:
description: Metadata about the VM
returned: always
type: dict
sample: {
"cdrom": {
"type": "none"
},
"customization_agent": "native",
"disks": [
{
"name": "testvm_11-0",
"name_desc": "",
"os_device": "xvda",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "0"
},
{
"name": "testvm_11-1",
"name_desc": "",
"os_device": "xvdb",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "1"
}
],
"domid": "56",
"folder": "",
"hardware": {
"memory_mb": 8192,
"num_cpu_cores_per_socket": 2,
"num_cpus": 4
},
"home_server": "",
"is_template": false,
"name": "testvm_11",
"name_desc": "",
"networks": [
{
"gateway": "192.168.0.254",
"gateway6": "fc00::fffe",
"ip": "192.168.0.200",
"ip6": [
"fe80:0000:0000:0000:e9cb:625a:32c5:c291",
"fc00:0000:0000:0000:0000:0000:0000:0001"
],
"mac": "ba:91:3a:48:20:76",
"mtu": "1500",
"name": "Pool-wide network associated with eth1",
"netmask": "255.255.255.128",
"prefix": "25",
"prefix6": "64",
"vif_device": "0"
}
],
"other_config": {
"base_template_name": "Windows Server 2016 (64-bit)",
"import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
"install-methods": "cdrom",
"instant": "true",
"mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
},
"platform": {
"acpi": "1",
"apic": "true",
"cores-per-socket": "2",
"device_id": "0002",
"hpet": "true",
"nx": "true",
"pae": "true",
"timeoffset": "-25200",
"vga": "std",
"videoram": "8",
"viridian": "true",
"viridian_reference_tsc": "true",
"viridian_time_ref_count": "true"
},
"state": "poweredon",
"uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
"xenstore_data": {
"vm-data": ""
}
}
changes:
description: Detected or made changes to VM
returned: always
type: list
sample: [
{
"hardware": [
"num_cpus"
]
},
{
"disks_changed": [
[],
[
"size"
]
]
},
{
"disks_new": [
{
"name": "new-disk",
"name_desc": "",
"position": 2,
"size_gb": "4",
"vbd_userdevice": "2"
}
]
},
{
"cdrom": [
"type",
"iso_name"
]
},
{
"networks_changed": [
[
"mac"
],
]
},
{
"networks_new": [
{
"name": "Pool-wide network associated with eth2",
"position": 1,
"vif_device": "1"
}
]
},
"need_poweredoff"
]
'''
import re
HAS_XENAPI = False
try:
import XenAPI
HAS_XENAPI = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.network import is_mac
from ansible.module_utils import six
from ansible.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
gather_vm_params, gather_vm_facts, set_vm_power_state, wait_for_vm_ip_address,
is_valid_ip_addr, is_valid_ip_netmask, is_valid_ip_prefix,
ip_prefix_to_netmask, ip_netmask_to_prefix,
is_valid_ip6_addr, is_valid_ip6_prefix)
class XenServerVM(XenServerObject):
"""Class for managing XenServer VM.
Attributes:
vm_ref (str): XAPI reference to VM.
vm_params (dict): A dictionary with VM parameters as returned
by gather_vm_params() function.
"""
def __init__(self, module):
"""Inits XenServerVM using module parameters.
Args:
module: Reference to Ansible module object.
"""
super(XenServerVM, self).__init__(module)
self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
self.gather_params()
def exists(self):
"""Returns True if VM exists, else False."""
return True if self.vm_ref is not None else False
def gather_params(self):
"""Gathers all VM parameters available in XAPI database."""
self.vm_params = gather_vm_params(self.module, self.vm_ref)
def gather_facts(self):
"""Gathers and returns VM facts."""
return gather_vm_facts(self.module, self.vm_params)
def set_power_state(self, power_state):
"""Controls VM power state."""
state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
# If state has changed, update vm_params.
if state_changed:
self.vm_params['power_state'] = current_state.capitalize()
return state_changed
def wait_for_ip_address(self):
"""Waits for VM to acquire an IP address."""
self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
def deploy(self):
"""Deploys new VM from template."""
# Safety check.
if self.exists():
self.module.fail_json(msg="Called deploy on existing VM!")
try:
templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
msg_prefix="VM deploy: ")
# Is this an existing running VM?
if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
# Find a SR we can use for VM.copy(). We use SR of the first disk
# if specified or default SR if not specified.
disk_params_list = self.module.params['disks']
sr_ref = None
if disk_params_list:
disk_params = disk_params_list[0]
disk_sr_uuid = disk_params.get('sr_uuid')
disk_sr = disk_params.get('sr')
if disk_sr_uuid is not None or disk_sr is not None:
sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
msg_prefix="VM deploy disks[0]: ")
if not sr_ref:
if self.default_sr_ref != "OpaqueRef:NULL":
sr_ref = self.default_sr_ref
else:
self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
# VM name could be an empty string which is bad.
if self.module.params['name'] is not None and not self.module.params['name']:
self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
# Support for Ansible check mode.
if self.module.check_mode:
return
# Now we can instantiate VM. We use VM.clone for linked_clone and
# VM.copy for non linked_clone.
if self.module.params['linked_clone']:
self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
else:
self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
# Description is copied over from template so we reset it.
self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
# If template is one of built-in XenServer templates, we have to
# do some additional steps.
# Note: VM.get_is_default_template() is supported from XenServer 7.2
# onward so we use an alternative way.
templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
if "default_template" in templ_other_config and templ_other_config['default_template']:
# other_config of built-in XenServer templates have a key called
# 'disks' with the following content:
# disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
# This value of other_data is copied to cloned or copied VM and
# it prevents provisioning of VM because sr is not specified and
# XAPI returns an error. To get around this, we remove the
# 'disks' key and add disks to VM later ourselves.
vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
if "disks" in vm_other_config:
del vm_other_config['disks']
self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
# At this point we have VM ready for provisioning.
self.xapi_session.xenapi.VM.provision(self.vm_ref)
# After provisioning we can prepare vm_params for reconfigure().
self.gather_params()
# VM is almost ready. We just need to reconfigure it...
self.reconfigure()
# Power on VM if needed.
if self.module.params['state'] == "poweredon":
self.set_power_state("poweredon")
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def reconfigure(self):
"""Reconfigures an existing VM.
Returns:
list: parameters that were reconfigured.
"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called reconfigure on non existing VM!")
config_changes = self.get_changes()
vm_power_state_save = self.vm_params['power_state'].lower()
if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
# Support for Ansible check mode.
if self.module.check_mode:
return config_changes
if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
self.set_power_state("shutdownguest")
try:
for change in config_changes:
if isinstance(change, six.string_types):
if change == "name":
self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
elif change == "name_desc":
self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
elif change == "folder":
self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
if self.module.params['folder']:
self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
elif change == "home_server":
if self.module.params['home_server']:
host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
else:
host_ref = "OpaqueRef:NULL"
self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
elif isinstance(change, dict):
if change.get('hardware'):
for hardware_change in change['hardware']:
if hardware_change == "num_cpus":
num_cpus = int(self.module.params['hardware']['num_cpus'])
if num_cpus < int(self.vm_params['VCPUs_at_startup']):
self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
else:
self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
elif hardware_change == "num_cpu_cores_per_socket":
self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
if num_cpu_cores_per_socket > 1:
self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
elif hardware_change == "memory_mb":
memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
elif change.get('disks_changed'):
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
position = 0
for disk_change_list in change['disks_changed']:
for disk_change in disk_change_list:
vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
if disk_change == "name":
self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
elif disk_change == "name_desc":
self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
elif disk_change == "size":
self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
"VM reconfigure disks[%s]: " % position)))
position += 1
elif change.get('disks_new'):
for position, disk_userdevice in change['disks_new']:
disk_params = self.module.params['disks'][position]
disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
if disk_params.get('sr_uuid'):
sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
elif disk_params.get('sr'):
sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
else:
sr_ref = self.default_sr_ref
disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
new_disk_vdi = {
"name_label": disk_name,
"name_description": disk_name_desc,
"SR": sr_ref,
"virtual_size": disk_size,
"type": "user",
"sharable": False,
"read_only": False,
"other_config": {},
}
new_disk_vbd = {
"VM": self.vm_ref,
"VDI": None,
"userdevice": disk_userdevice,
"bootable": False,
"mode": "RW",
"type": "Disk",
"empty": False,
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
elif change.get('cdrom'):
vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
# If there is no CD present, we have to create one.
if not vm_cdrom_params_list:
# We will try to place cdrom at userdevice position
# 3 (which is default) if it is not already occupied
# else we will place it at first allowed position.
cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
if "3" in cdrom_userdevices_allowed:
cdrom_userdevice = "3"
else:
cdrom_userdevice = cdrom_userdevices_allowed[0]
cdrom_vbd = {
"VM": self.vm_ref,
"VDI": "OpaqueRef:NULL",
"userdevice": cdrom_userdevice,
"bootable": False,
"mode": "RO",
"type": "CD",
"empty": True,
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
else:
cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
for cdrom_change in change['cdrom']:
if cdrom_change == "type":
cdrom_type = self.module.params['cdrom']['type']
if cdrom_type == "none" and not cdrom_is_empty:
self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
elif cdrom_type == "host":
# Unimplemented!
pass
elif cdrom_change == "iso_name":
if not cdrom_is_empty:
self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
elif change.get('networks_changed'):
position = 0
for network_change_list in change['networks_changed']:
if network_change_list:
vm_vif_params = self.vm_params['VIFs'][position]
network_params = self.module.params['networks'][position]
vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
vif_recreated = False
if "name" in network_change_list or "mac" in network_change_list:
# To change network or MAC, we destroy old
# VIF and then create a new one with changed
# parameters. That's how XenCenter does it.
# Copy all old parameters to new VIF record.
vif = {
"device": vm_vif_params['device'],
"network": network_ref,
"VM": vm_vif_params['VM'],
"MAC": vm_vif_params['MAC'],
"MTU": vm_vif_params['MTU'],
"other_config": vm_vif_params['other_config'],
"qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
"qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
"locking_mode": vm_vif_params['locking_mode'],
"ipv4_allowed": vm_vif_params['ipv4_allowed'],
"ipv6_allowed": vm_vif_params['ipv6_allowed'],
}
if "name" in network_change_list:
network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
vif['network'] = network_ref_new
vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
if "mac" in network_change_list:
vif['MAC'] = network_params['mac'].lower()
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.unplug(vif_ref)
self.xapi_session.xenapi.VIF.destroy(vif_ref)
vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.plug(vif_ref_new)
vif_ref = vif_ref_new
vif_recreated = True
if self.vm_params['customization_agent'] == "native":
vif_reconfigure_needed = False
if "type" in network_change_list:
network_type = network_params['type'].capitalize()
vif_reconfigure_needed = True
else:
network_type = vm_vif_params['ipv4_configuration_mode']
if "ip" in network_change_list:
network_ip = network_params['ip']
vif_reconfigure_needed = True
elif vm_vif_params['ipv4_addresses']:
network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
else:
network_ip = ""
if "prefix" in network_change_list:
network_prefix = "/%s" % network_params['prefix']
vif_reconfigure_needed = True
elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
else:
network_prefix = ""
if "gateway" in network_change_list:
network_gateway = network_params['gateway']
vif_reconfigure_needed = True
else:
network_gateway = vm_vif_params['ipv4_gateway']
if vif_recreated or vif_reconfigure_needed:
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
"%s%s" % (network_ip, network_prefix), network_gateway)
vif_reconfigure_needed = False
if "type6" in network_change_list:
network_type6 = network_params['type6'].capitalize()
vif_reconfigure_needed = True
else:
network_type6 = vm_vif_params['ipv6_configuration_mode']
if "ip6" in network_change_list:
network_ip6 = network_params['ip6']
vif_reconfigure_needed = True
elif vm_vif_params['ipv6_addresses']:
network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
else:
network_ip6 = ""
if "prefix6" in network_change_list:
network_prefix6 = "/%s" % network_params['prefix6']
vif_reconfigure_needed = True
elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
else:
network_prefix6 = ""
if "gateway6" in network_change_list:
network_gateway6 = network_params['gateway6']
vif_reconfigure_needed = True
else:
network_gateway6 = vm_vif_params['ipv6_gateway']
if vif_recreated or vif_reconfigure_needed:
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
"%s%s" % (network_ip6, network_prefix6), network_gateway6)
elif self.vm_params['customization_agent'] == "custom":
vif_device = vm_vif_params['device']
# A user could have manually changed network
# or mac e.g. trough XenCenter and then also
# make those changes in playbook manually.
# In that case, module will not detect any
# changes and info in xenstore_data will
# become stale. For that reason we always
# update name and mac in xenstore_data.
# Since we handle name and mac differently,
# we have to remove them from
# network_change_list.
network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
for network_change in network_change_list_tmp + ['name', 'mac']:
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, network_change))
if network_params.get('name'):
network_name = network_params['name']
else:
network_name = vm_vif_params['network']['name_label']
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
if network_params.get('mac'):
network_mac = network_params['mac'].lower()
else:
network_mac = vm_vif_params['MAC'].lower()
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
for network_change in network_change_list_tmp:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, network_change),
network_params[network_change])
position += 1
elif change.get('networks_new'):
for position, vif_device in change['networks_new']:
network_params = self.module.params['networks'][position]
network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
network_name = network_params['name']
network_mac = network_params['mac'] if network_params.get('mac') else ""
network_type = network_params.get('type')
network_ip = network_params['ip'] if network_params.get('ip') else ""
network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
network_type6 = network_params.get('type6')
network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
vif = {
"device": vif_device,
"network": network_ref,
"VM": self.vm_ref,
"MAC": network_mac,
"MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.plug(vif_ref_new)
if self.vm_params['customization_agent'] == "native":
if network_type and network_type == "static":
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
"%s/%s" % (network_ip, network_prefix), network_gateway)
if network_type6 and network_type6 == "static":
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
"%s/%s" % (network_ip6, network_prefix6), network_gateway6)
elif self.vm_params['customization_agent'] == "custom":
# We first have to remove any existing data
# from xenstore_data because there could be
# some old leftover data from some interface
# that once occupied same device location as
# our new interface.
for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
# We get MAC from VIF itself instead of
# networks.mac because it could be
# autogenerated.
vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
if network_type:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
if network_type == "static":
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/ip" % vif_device, network_ip)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/prefix" % vif_device, network_prefix)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/netmask" % vif_device, network_netmask)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/gateway" % vif_device, network_gateway)
if network_type6:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
if network_type6 == "static":
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/ip6" % vif_device, network_ip6)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
elif change.get('custom_params'):
for position in change['custom_params']:
custom_param_key = self.module.params['custom_params'][position]['key']
custom_param_value = self.module.params['custom_params'][position]['value']
self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
if self.module.params['is_template']:
self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
self.set_power_state("poweredon")
# Gather new params after reconfiguration.
self.gather_params()
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
return config_changes
def destroy(self):
"""Removes an existing VM with associated disks"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called destroy on non existing VM!")
if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
# Support for Ansible check mode.
if self.module.check_mode:
return
# Make sure that VM is poweredoff before we can destroy it.
self.set_power_state("poweredoff")
try:
# Destroy VM!
self.xapi_session.xenapi.VM.destroy(self.vm_ref)
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
# Destroy all VDIs associated with VM!
for vm_disk_params in vm_disk_params_list:
vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
self.xapi_session.xenapi.VDI.destroy(vdi_ref)
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def get_changes(self):
"""Finds VM parameters that differ from specified ones.
This method builds a dictionary with hierarchy of VM parameters
that differ from those specified in module parameters.
Returns:
list: VM parameters that differ from those specified in
module parameters.
"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called get_changes on non existing VM!")
need_poweredoff = False
if self.module.params['is_template']:
need_poweredoff = True
try:
# This VM could be a template or a snapshot. In that case we fail
# because we can't reconfigure them or it would just be too
# dangerous.
if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
if self.vm_params['is_a_snapshot']:
self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
# Let's build a list of parameters that changed.
config_changes = []
# Name could only differ if we found an existing VM by uuid.
if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
if self.module.params['name']:
config_changes.append('name')
else:
self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
config_changes.append('name_desc')
# Folder parameter is found in other_config.
vm_other_config = self.vm_params['other_config']
vm_folder = vm_other_config.get('folder', '')
if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
config_changes.append('folder')
if self.module.params['home_server'] is not None:
if (self.module.params['home_server'] and
(not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
# Check existance only. Ignore return value.
get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
msg_prefix="VM check home_server: ")
config_changes.append('home_server')
elif not self.module.params['home_server'] and self.vm_params['affinity']:
config_changes.append('home_server')
config_changes_hardware = []
if self.module.params['hardware']:
num_cpus = self.module.params['hardware'].get('num_cpus')
if num_cpus is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
num_cpus = int(num_cpus)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
if num_cpus < 1:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
# We can use VCPUs_at_startup or VCPUs_max parameter. I'd
# say the former is the way to go but this needs
# confirmation and testing.
if num_cpus != int(self.vm_params['VCPUs_at_startup']):
config_changes_hardware.append('num_cpus')
# For now, we don't support hotpluging so VM has to be in
# poweredoff state to reconfigure.
need_poweredoff = True
num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
if num_cpu_cores_per_socket is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
if num_cpu_cores_per_socket < 1:
self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
vm_platform = self.vm_params['platform']
vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
if num_cpu_cores_per_socket != vm_cores_per_socket:
config_changes_hardware.append('num_cpu_cores_per_socket')
# For now, we don't support hotpluging so VM has to be
# in poweredoff state to reconfigure.
need_poweredoff = True
memory_mb = self.module.params['hardware'].get('memory_mb')
if memory_mb is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
memory_mb = int(memory_mb)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
if memory_mb < 1:
self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
# There are multiple memory parameters:
# - memory_dynamic_max
# - memory_dynamic_min
# - memory_static_max
# - memory_static_min
# - memory_target
#
# memory_target seems like a good candidate but it returns 0 for
# halted VMs so we can't use it.
#
# I decided to use memory_dynamic_max and memory_static_max
# and use whichever is larger. This strategy needs validation
# and testing.
#
# XenServer stores memory size in bytes so we need to divide
# it by 1024*1024 = 1048576.
if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
config_changes_hardware.append('memory_mb')
# For now, we don't support hotpluging so VM has to be in
# poweredoff state to reconfigure.
need_poweredoff = True
if config_changes_hardware:
config_changes.append({"hardware": config_changes_hardware})
config_changes_disks = []
config_new_disks = []
# Find allowed userdevices.
vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
if self.module.params['disks']:
# Get the list of all disk. Filter out any CDs found.
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
# Number of disks defined in module params have to be same or
# higher than a number of existing disks attached to the VM.
# We don't support removal or detachment of disks.
if len(self.module.params['disks']) < len(vm_disk_params_list):
self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
(len(self.module.params['disks']), len(vm_disk_params_list)))
# Find the highest disk occupied userdevice.
if not vm_disk_params_list:
vm_disk_userdevice_highest = "-1"
else:
vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
for position in range(len(self.module.params['disks'])):
if position < len(vm_disk_params_list):
vm_disk_params = vm_disk_params_list[position]
else:
vm_disk_params = None
disk_params = self.module.params['disks'][position]
disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
disk_name = disk_params.get('name')
if disk_name is not None and not disk_name:
self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
# If this is an existing disk.
if vm_disk_params and vm_disk_params['VDI']:
disk_changes = []
if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
disk_changes.append('name')
disk_name_desc = disk_params.get('name_desc')
if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
disk_changes.append('name_desc')
if disk_size:
if disk_size > int(vm_disk_params['VDI']['virtual_size']):
disk_changes.append('size')
need_poweredoff = True
elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
"Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
config_changes_disks.append(disk_changes)
# If this is a new disk.
else:
if not disk_size:
self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
disk_sr_uuid = disk_params.get('sr_uuid')
disk_sr = disk_params.get('sr')
if disk_sr_uuid is not None or disk_sr is not None:
# Check existance only. Ignore return value.
get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
msg_prefix="VM check disks[%s]: " % position)
elif self.default_sr_ref == 'OpaqueRef:NULL':
self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
if not vbd_userdevices_allowed:
self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
disk_userdevice = None
# We need to place a new disk right above the highest
# placed existing disk to maintain relative disk
# positions pairable with disk specifications in
# module params. That place must not be occupied by
# some other device like CD-ROM.
for userdevice in vbd_userdevices_allowed:
if int(userdevice) > int(vm_disk_userdevice_highest):
disk_userdevice = userdevice
vbd_userdevices_allowed.remove(userdevice)
vm_disk_userdevice_highest = userdevice
break
# If no place was found.
if disk_userdevice is None:
# Highest occupied place could be a CD-ROM device
# so we have to include all devices regardless of
# type when calculating out-of-bound position.
disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
# For new disks we only track their position.
config_new_disks.append((position, disk_userdevice))
# We should append config_changes_disks to config_changes only
# if there is at least one changed disk, else skip.
for disk_change in config_changes_disks:
if disk_change:
config_changes.append({"disks_changed": config_changes_disks})
break
if config_new_disks:
config_changes.append({"disks_new": config_new_disks})
config_changes_cdrom = []
if self.module.params['cdrom']:
# Get the list of all CD-ROMs. Filter out any regular disks
# found. If we found no existing CD-ROM, we will create it
# later else take the first one found.
vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
# If no existing CD-ROM is found, we will need to add one.
# We need to check if there is any userdevice allowed.
if not vm_cdrom_params_list and not vbd_userdevices_allowed:
self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
cdrom_type = self.module.params['cdrom'].get('type')
cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
# If cdrom.iso_name is specified but cdrom.type is not,
# then set cdrom.type to 'iso', unless cdrom.iso_name is
# an empty string, in that case set cdrom.type to 'none'.
if not cdrom_type:
if cdrom_iso_name:
cdrom_type = "iso"
elif cdrom_iso_name is not None:
cdrom_type = "none"
self.module.params['cdrom']['type'] = cdrom_type
# If type changed.
if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
config_changes_cdrom.append('type')
if cdrom_type == "iso":
# Check if ISO exists.
# Check existance only. Ignore return value.
get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
msg_prefix="VM check cdrom.iso_name: ")
# Is ISO image changed?
if (cdrom_iso_name and
(not vm_cdrom_params_list or
not vm_cdrom_params_list[0]['VDI'] or
cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
config_changes_cdrom.append('iso_name')
if config_changes_cdrom:
config_changes.append({"cdrom": config_changes_cdrom})
config_changes_networks = []
config_new_networks = []
# Find allowed devices.
vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
if self.module.params['networks']:
# Number of VIFs defined in module params have to be same or
# higher than a number of existing VIFs attached to the VM.
# We don't support removal of VIFs.
if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
(len(self.module.params['networks']), len(self.vm_params['VIFs'])))
# Find the highest occupied device.
if not self.vm_params['VIFs']:
vif_device_highest = "-1"
else:
vif_device_highest = self.vm_params['VIFs'][-1]['device']
for position in range(len(self.module.params['networks'])):
if position < len(self.vm_params['VIFs']):
vm_vif_params = self.vm_params['VIFs'][position]
else:
vm_vif_params = None
network_params = self.module.params['networks'][position]
network_name = network_params.get('name')
if network_name is not None and not network_name:
self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
if network_name:
# Check existance only. Ignore return value.
get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
msg_prefix="VM check networks[%s]: " % position)
network_mac = network_params.get('mac')
if network_mac is not None:
network_mac = network_mac.lower()
if not is_mac(network_mac):
self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
# IPv4 reconfiguration.
network_type = network_params.get('type')
network_ip = network_params.get('ip')
network_netmask = network_params.get('netmask')
network_prefix = None
# If networks.ip is specified and networks.type is not,
# then set networks.type to 'static'.
if not network_type and network_ip:
network_type = "static"
# XenServer natively supports only 'none' and 'static'
# type with 'none' being the same as 'dhcp'.
if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
network_type = "none"
if network_type and network_type == "static":
if network_ip is not None:
network_ip_split = network_ip.split('/')
network_ip = network_ip_split[0]
if network_ip and not is_valid_ip_addr(network_ip):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
if len(network_ip_split) > 1:
network_prefix = network_ip_split[1]
if not is_valid_ip_prefix(network_prefix):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
if network_netmask is not None:
if not is_valid_ip_netmask(network_netmask):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
elif network_prefix is not None:
network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
# If any parameter is overridden at this point, update it.
if network_type:
network_params['type'] = network_type
if network_ip:
network_params['ip'] = network_ip
if network_netmask:
network_params['netmask'] = network_netmask
if network_prefix:
network_params['prefix'] = network_prefix
network_gateway = network_params.get('gateway')
# Gateway can be an empty string (when removing gateway
# configuration) but if it is not, it should be validated.
if network_gateway and not is_valid_ip_addr(network_gateway):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
# IPv6 reconfiguration.
network_type6 = network_params.get('type6')
network_ip6 = network_params.get('ip6')
network_prefix6 = None
# If networks.ip6 is specified and networks.type6 is not,
# then set networks.type6 to 'static'.
if not network_type6 and network_ip6:
network_type6 = "static"
# XenServer natively supports only 'none' and 'static'
# type with 'none' being the same as 'dhcp'.
if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
network_type6 = "none"
if network_type6 and network_type6 == "static":
if network_ip6 is not None:
network_ip6_split = network_ip6.split('/')
network_ip6 = network_ip6_split[0]
if network_ip6 and not is_valid_ip6_addr(network_ip6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
if len(network_ip6_split) > 1:
network_prefix6 = network_ip6_split[1]
if not is_valid_ip6_prefix(network_prefix6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
# If any parameter is overridden at this point, update it.
if network_type6:
network_params['type6'] = network_type6
if network_ip6:
network_params['ip6'] = network_ip6
if network_prefix6:
network_params['prefix6'] = network_prefix6
network_gateway6 = network_params.get('gateway6')
# Gateway can be an empty string (when removing gateway
# configuration) but if it is not, it should be validated.
if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
# If this is an existing VIF.
if vm_vif_params and vm_vif_params['network']:
network_changes = []
if network_name and network_name != vm_vif_params['network']['name_label']:
network_changes.append('name')
if network_mac and network_mac != vm_vif_params['MAC'].lower():
network_changes.append('mac')
if self.vm_params['customization_agent'] == "native":
if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
network_changes.append('type')
if network_type and network_type == "static":
if network_ip and (not vm_vif_params['ipv4_addresses'] or
not vm_vif_params['ipv4_addresses'][0] or
network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
network_changes.append('ip')
if network_prefix and (not vm_vif_params['ipv4_addresses'] or
not vm_vif_params['ipv4_addresses'][0] or
network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
network_changes.append('prefix')
network_changes.append('netmask')
if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
network_changes.append('gateway')
if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
network_changes.append('type6')
if network_type6 and network_type6 == "static":
if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
not vm_vif_params['ipv6_addresses'][0] or
network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
network_changes.append('ip6')
if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
not vm_vif_params['ipv6_addresses'][0] or
network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
network_changes.append('prefix6')
if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
network_changes.append('gateway6')
elif self.vm_params['customization_agent'] == "custom":
vm_xenstore_data = self.vm_params['xenstore_data']
if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
network_changes.append('type')
need_poweredoff = True
if network_type and network_type == "static":
if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
network_changes.append('ip')
need_poweredoff = True
if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
network_changes.append('prefix')
network_changes.append('netmask')
need_poweredoff = True
if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
vm_vif_params['device'], ""):
network_changes.append('gateway')
need_poweredoff = True
if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
network_changes.append('type6')
need_poweredoff = True
if network_type6 and network_type6 == "static":
if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
network_changes.append('ip6')
need_poweredoff = True
if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
network_changes.append('prefix6')
need_poweredoff = True
if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
vm_vif_params['device'], ""):
network_changes.append('gateway6')
need_poweredoff = True
config_changes_networks.append(network_changes)
# If this is a new VIF.
else:
if not network_name:
self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
if network_type and network_type == "static" and network_ip and not network_netmask:
self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
# Restart is needed if we are adding new network
# interface with IP/gateway parameters specified
# and custom agent is used.
if self.vm_params['customization_agent'] == "custom":
for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
if network_params.get(parameter):
need_poweredoff = True
break
if not vif_devices_allowed:
self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
# We need to place a new network interface right above the
# highest placed existing interface to maintain relative
# positions pairable with network interface specifications
# in module params.
vif_device = str(int(vif_device_highest) + 1)
if vif_device not in vif_devices_allowed:
self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
vif_devices_allowed.remove(vif_device)
vif_device_highest = vif_device
# For new VIFs we only track their position.
config_new_networks.append((position, vif_device))
# We should append config_changes_networks to config_changes only
# if there is at least one changed network, else skip.
for network_change in config_changes_networks:
if network_change:
config_changes.append({"networks_changed": config_changes_networks})
break
if config_new_networks:
config_changes.append({"networks_new": config_new_networks})
config_changes_custom_params = []
if self.module.params['custom_params']:
for position in range(len(self.module.params['custom_params'])):
custom_param = self.module.params['custom_params'][position]
custom_param_key = custom_param['key']
custom_param_value = custom_param['value']
if custom_param_key not in self.vm_params:
self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
if custom_param_value != self.vm_params[custom_param_key]:
# We only need to track custom param position.
config_changes_custom_params.append(position)
if config_changes_custom_params:
config_changes.append({"custom_params": config_changes_custom_params})
if need_poweredoff:
config_changes.append('need_poweredoff')
return config_changes
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def get_normalized_disk_size(self, disk_params, msg_prefix=""):
"""Parses disk size parameters and returns disk size in bytes.
This method tries to parse disk size module parameters. It fails
with an error message if size cannot be parsed.
Args:
disk_params (dist): A dictionary with disk parameters.
msg_prefix (str): A string error messages should be prefixed
with (default: "").
Returns:
int: disk size in bytes if disk size is successfully parsed or
None if no disk size parameters were found.
"""
# There should be only single size spec but we make a list of all size
# specs just in case. Priority is given to 'size' but if not found, we
# check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
# found.
disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
if disk_size_spec:
try:
# size
if "size" in disk_size_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
disk_size_m = size_regex.match(disk_params['size'])
if disk_size_m:
size = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
# size_tb, size_gb, size_mb, size_kb, size_b
else:
size = disk_params[disk_size_spec[0]]
unit = disk_size_spec[0].split('_')[-1]
if not unit:
unit = "b"
else:
unit = unit.lower()
if re.match(r'\d+\.\d+', size):
# We found float value in string, let's typecast it.
if unit == "b":
# If we found float but unit is bytes, we get the integer part only.
size = int(float(size))
else:
size = float(size)
else:
# We found int value in string, let's typecast it.
size = int(size)
if not size or size < 0:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
if unit in disk_units:
return int(size * (1024 ** disk_units[unit]))
else:
self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
(msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
else:
return None
@staticmethod
def get_cdrom_type(vm_cdrom_params):
"""Returns VM CD-ROM type."""
# TODO: implement support for detecting type host. No server to test
# this on at the moment.
if vm_cdrom_params['empty']:
return "none"
else:
return "iso"
def main():
argument_spec = xenserver_common_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['present', 'absent', 'poweredon']),
name=dict(type='str', aliases=['name_label']),
name_desc=dict(type='str'),
uuid=dict(type='str'),
template=dict(type='str', aliases=['template_src']),
template_uuid=dict(type='str'),
is_template=dict(type='bool', default=False),
folder=dict(type='str'),
hardware=dict(
type='dict',
options=dict(
num_cpus=dict(type='int'),
num_cpu_cores_per_socket=dict(type='int'),
memory_mb=dict(type='int'),
),
),
disks=dict(
type='list',
elements='dict',
options=dict(
size=dict(type='str'),
size_tb=dict(type='str'),
size_gb=dict(type='str'),
size_mb=dict(type='str'),
size_kb=dict(type='str'),
size_b=dict(type='str'),
name=dict(type='str', aliases=['name_label']),
name_desc=dict(type='str'),
sr=dict(type='str'),
sr_uuid=dict(type='str'),
),
aliases=['disk'],
mutually_exclusive=[
['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
['sr', 'sr_uuid'],
],
),
cdrom=dict(
type='dict',
options=dict(
type=dict(type='str', choices=['none', 'iso']),
iso_name=dict(type='str'),
),
required_if=[
['type', 'iso', ['iso_name']],
],
),
networks=dict(
type='list',
elements='dict',
options=dict(
name=dict(type='str', aliases=['name_label']),
mac=dict(type='str'),
type=dict(type='str', choices=['none', 'dhcp', 'static']),
ip=dict(type='str'),
netmask=dict(type='str'),
gateway=dict(type='str'),
type6=dict(type='str', choices=['none', 'dhcp', 'static']),
ip6=dict(type='str'),
gateway6=dict(type='str'),
),
aliases=['network'],
required_if=[
['type', 'static', ['ip']],
['type6', 'static', ['ip6']],
],
),
home_server=dict(type='str'),
custom_params=dict(
type='list',
elements='dict',
options=dict(
key=dict(type='str', required=True),
value=dict(type='raw', required=True),
),
),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
linked_clone=dict(type='bool', default=False),
force=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid'],
],
mutually_exclusive=[
['template', 'template_uuid'],
],
)
result = {'failed': False, 'changed': False}
vm = XenServerVM(module)
# Find existing VM
if vm.exists():
if module.params['state'] == "absent":
vm.destroy()
result['changed'] = True
elif module.params['state'] == "present":
config_changes = vm.reconfigure()
if config_changes:
result['changed'] = True
# Make new disk and network changes more user friendly
# and informative.
for change in config_changes:
if isinstance(change, dict):
if change.get('disks_new'):
disks_new = []
for position, userdevice in change['disks_new']:
disk_new_params = {"position": position, "vbd_userdevice": userdevice}
disk_params = module.params['disks'][position]
for k in disk_params.keys():
if disk_params[k] is not None:
disk_new_params[k] = disk_params[k]
disks_new.append(disk_new_params)
if disks_new:
change['disks_new'] = disks_new
elif change.get('networks_new'):
networks_new = []
for position, device in change['networks_new']:
network_new_params = {"position": position, "vif_device": device}
network_params = module.params['networks'][position]
for k in network_params.keys():
if network_params[k] is not None:
network_new_params[k] = network_params[k]
networks_new.append(network_new_params)
if networks_new:
change['networks_new'] = networks_new
result['changes'] = config_changes
elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
result['changed'] = vm.set_power_state(module.params['state'])
elif module.params['state'] != "absent":
vm.deploy()
result['changed'] = True
if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
vm.wait_for_ip_address()
result['instance'] = vm.gather_facts()
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
marinho/geraldo | site/newsite/site-geraldo/django/utils/daemonize.py | 452 | 1907 | import os
import sys
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
| lgpl-3.0 |
punalpatel/st2 | st2common/st2common/controllers.py | 5 | 1728 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import expose
__all__ = [
'BaseRootController'
]
class BaseRootController(object):
logger = None
controllers = None
default_controller = None
@expose()
def _lookup(self, *remainder):
version = ''
if len(remainder) > 0:
version = remainder[0]
if remainder[len(remainder) - 1] == '':
# If the url has a trailing '/' remainder will contain an empty string.
# In order for further pecan routing to work this method needs to remove
# the empty string from end of the tuple.
remainder = remainder[:len(remainder) - 1]
versioned_controller = self.controllers.get(version, None)
if versioned_controller:
return versioned_controller, remainder[1:]
self.logger.debug('No version specified in URL. Will use default controller.')
return self.default_controller, remainder
| apache-2.0 |
aselle/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 39 | 32726 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
hslatman/spiderfoot | ext/dns/rdtypes/ANY/NSEC3.py | 42 | 6807 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import base64
import cStringIO
import string
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
'0123456789ABCDEFGHIJKLMNOPQRSTUV')
# hash algorithm constants
SHA1 = 1
# flag constants
OPTOUT = 1
class NSEC3(dns.rdata.Rdata):
"""NSEC3 record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string
@ivar next: the next name hash
@type next: string
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
next, windows):
super(NSEC3, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = base64.b32encode(self.next).translate(b32_normal_to_hex).lower()
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%u %u %u %s %s%s' % (self.algorithm, self.flags, self.iterations,
salt, next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
next = tok.get_string().upper().translate(b32_hex_to_normal)
next = base64.b32decode(next)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC3 with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
if octets != 0:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
if octets != 0:
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
l = len(self.next)
file.write(struct.pack("!B", l))
file.write(self.next)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen].unwrap()
current += slen
rdlen -= slen
(nlen, ) = struct.unpack('!B', wire[current])
current += 1
rdlen -= 1
next = wire[current : current + nlen].unwrap()
current += nlen
rdlen -= nlen
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC3 too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC3 octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC3 bitmap length")
bitmap = wire[current : current + octets].unwrap()
current += octets
rdlen -= octets
windows.append((window, bitmap))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, windows)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
| gpl-2.0 |
Nexenta/cinder | cinder/wsgi/wsgi.py | 9 | 1282 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cinder OS API WSGI application."""
import sys
import warnings
from cinder import objects
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config
from cinder import rpc
from cinder import version
CONF = cfg.CONF
def initialize_application():
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
config.set_middleware_defaults()
rpc.init(CONF)
return wsgi.Loader(CONF).load_app(name='osapi_volume')
| apache-2.0 |
louietsai/python-for-android | python3-alpha/python3-src/android-scripts/say_chat.py | 46 | 2159 | __author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import logging
import android
from pyxmpp2.jid import JID
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.interfaces import message_stanza_handler
from pyxmpp2.streamevents import DisconnectedEvent
from pyxmpp2.ext.version import VersionProvider
logging.basicConfig(level = logging.INFO)
xmpp_trace = False
class SayChat(EventHandler, XMPPFeatureHandler):
def __init__(self):
self.droid = android.Android()
settings = XMPPSettings({"software_name": "Say Chat"})
settings["jid"] = self.droid.dialogGetInput("Google Talk Username").result
settings["password"] = self.droid.dialogGetInput("Google Talk Password").result
settings["server"] = "talk.google.com"
settings["starttls"] = True
self.client = Client(
JID(settings["jid"]),
[self, VersionProvider(settings)],
settings)
def connect(self):
self.client.connect()
self.client.run()
def disconnect(self):
self.client.disconnect()
self.client.run(timeout = 2)
@message_stanza_handler()
def handle_message(self, stanza):
self.droid.ttsSpeak(
"{!s} says {!s}".format(stanza.from_jid.as_unicode(),
stanza.body))
return ""
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@event_handler()
def handle_all(self, event):
"""If it's not logged, it didn't happen."""
logging.info("-- {}".format(event))
def run(self):
try:
self.connect()
except KeyboardInterrupt:
self.disconnect()
if xmpp_trace:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.propagate = False
saychat = SayChat()
saychat.run()
| apache-2.0 |
dmitry-sobolev/ansible | lib/ansible/modules/database/proxysql/proxysql_mysql_users.py | 12 | 17560 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxysql_mysql_users
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Adds or removes mysql users from proxysql admin interface.
description:
- The M(proxysql_mysql_users) module adds or removes mysql users using the
proxysql admin interface.
options:
username:
description:
- Name of the user connecting to the mysqld or ProxySQL instance.
required: True
password:
description:
- Password of the user connecting to the mysqld or ProxySQL instance.
active:
description:
- A user with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures. If omitted
the proxysql database default for I(active) is C(True).
use_ssl:
description:
- If I(use_ssl) is set to C(True), connections by this user will be made
using SSL connections. If omitted the proxysql database default for
I(use_ssl) is C(False).
default_hostgroup:
description:
- If there is no matching rule for the queries sent by this user, the
traffic it generates is sent to the specified hostgroup.
If omitted the proxysql database default for I(use_ssl) is 0.
default_schema:
description:
- The schema to which the connection should change to by default.
transaction_persistent:
description:
- If this is set for the user with which the MySQL client is connecting
to ProxySQL (thus a "frontend" user), transactions started within a
hostgroup will remain within that hostgroup regardless of any other
rules.
If omitted the proxysql database default for I(transaction_persistent) is
C(False).
fast_forward:
description:
- If I(fast_forward) is set to C(True), I(fast_forward) will bypass the
query processing layer (rewriting, caching) and pass through the query
directly as is to the backend server. If omitted the proxysql database
default for I(fast_forward) is C(False).
backend:
description:
- If I(backend) is set to C(True), this (username, password) pair is
used for authenticating to the ProxySQL instance.
default: True
frontend:
description:
- If I(frontend) is set to C(True), this (username, password) pair is
used for authenticating to the mysqld servers against any hostgroup.
default: True
max_connections:
description:
- The maximum number of connections ProxySQL will open to the backend for
this user. If omitted the proxysql database default for
I(max_connections) is 10000.
state:
description:
- When C(present) - adds the user, when C(absent) - removes the user.
choices: [ "present", "absent" ]
default: present
save_to_disk:
description:
- Save mysql host config to sqlite db on disk to persist the
configuration.
default: True
load_to_runtime:
description:
- Dynamically load mysql host config to runtime memory.
default: True
login_user:
description:
- The username used to authenticate to ProxySQL admin interface.
default: None
login_password:
description:
- The password used to authenticate to ProxySQL admin interface.
default: None
login_host:
description:
- The host used to connect to ProxySQL admin interface.
default: '127.0.0.1'
login_port:
description:
- The port used to connect to ProxySQL admin interface.
default: 6032
config_file:
description:
- Specify a config file from which login_user and login_password are to
be read.
default: ''
'''
EXAMPLES = '''
---
# This example adds a user, it saves the mysql user config to disk, but
# avoids loading the mysql user config to runtime (this might be because
# several users are being added and the user wants to push the config to
# runtime in a single batch using the M(proxysql_manage_config) module). It
# uses supplied credentials to connect to the proxysql admin interface.
- proxysql_mysql_users:
login_user: 'admin'
login_password: 'admin'
username: 'productiondba'
state: present
load_to_runtime: False
# This example removes a user, saves the mysql user config to disk, and
# dynamically loads the mysql user config to runtime. It uses credentials
# in a supplied config file to connect to the proxysql admin interface.
- proxysql_mysql_users:
config_file: '~/proxysql.cnf'
username: 'mysqlboy'
state: absent
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified user, on delete
it will return the deleted record.
type: dict
sample": {
"changed": true,
"msg": "Added user to mysql_users",
"state": "present",
"user": {
"active": "1",
"backend": "1",
"default_hostgroup": "1",
"default_schema": null,
"fast_forward": "0",
"frontend": "1",
"max_connections": "10000",
"password": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"schema_locked": "0",
"transaction_persistent": "0",
"use_ssl": "0",
"username": "guest_ro"
},
"username": "guest_ro"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL USERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL USERS TO RUNTIME")
return True
class ProxySQLUser(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.username = module.params["username"]
self.backend = module.params["backend"]
self.frontend = module.params["frontend"]
config_data_keys = ["password",
"active",
"use_ssl",
"default_hostgroup",
"default_schema",
"transaction_persistent",
"fast_forward",
"max_connections"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_user_config_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def check_user_privs(self, cursor):
query_string = \
"""SELECT count(*) AS `user_count`
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
query_data.append(val)
query_string += "\n AND " + col + " = %s"
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['user_count']) > 0)
def get_user_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
user = cursor.fetchone()
return user
def create_user_config(self, cursor):
query_string = \
"""INSERT INTO mysql_users (
username,
backend,
frontend"""
cols = 3
query_data = \
[self.username,
self.backend,
self.frontend]
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += ",\n" + col
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
return True
def update_user_config(self, cursor):
query_string = """UPDATE mysql_users"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += ("\nWHERE username = %s\n AND backend = %s" +
"\n AND frontend = %s")
query_data.append(self.username)
query_data.append(self.backend)
query_data.append(self.frontend)
cursor.execute(query_string, query_data)
return True
def delete_user_config(self, cursor):
query_string = \
"""DELETE FROM mysql_users
WHERE username = %s
AND backend = %s
AND frontend = %s"""
query_data = \
[self.username,
self.backend,
self.frontend]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_user_config(cursor)
result['msg'] = "Added user to mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been added to" +
" mysql_users, however check_mode" +
" is enabled.")
def update_user(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_user_config(cursor)
result['msg'] = "Updated user in mysql_users"
result['user'] = \
self.get_user_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been updated in" +
" mysql_users, however check_mode" +
" is enabled.")
def delete_user(self, check_mode, result, cursor):
if not check_mode:
result['user'] = \
self.get_user_config(cursor)
result['changed'] = \
self.delete_user_config(cursor)
result['msg'] = "Deleted user from mysql_users"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("User would have been deleted from" +
" mysql_users, however check_mode is" +
" enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default='', type='path'),
username=dict(required=True, type='str'),
password=dict(no_log=True, type='str'),
active=dict(type='bool'),
use_ssl=dict(type='bool'),
default_hostgroup=dict(type='int'),
default_schema=dict(type='str'),
transaction_persistent=dict(type='bool'),
fast_forward=dict(type='bool'),
backend=dict(default=True, type='bool'),
frontend=dict(default=True, type='bool'),
max_connections=dict(type='int'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=MySQLdb.cursors.DictCursor)
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % e
)
proxysql_user = ProxySQLUser(module)
result = {}
result['state'] = proxysql_user.state
if proxysql_user.username:
result['username'] = proxysql_user.username
if proxysql_user.state == "present":
try:
if not proxysql_user.check_user_privs(cursor):
if not proxysql_user.check_user_config_exists(cursor):
proxysql_user.create_user(module.check_mode,
result,
cursor)
else:
proxysql_user.update_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user already exists in mysql_users" +
" and doesn't need to be updated.")
result['user'] = \
proxysql_user.get_user_config(cursor)
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to modify user.. %s" % e
)
elif proxysql_user.state == "absent":
try:
if proxysql_user.check_user_config_exists(cursor):
proxysql_user.delete_user(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The user is already absent from the" +
" mysql_users memory configuration")
except MySQLdb.Error:
e = get_exception()
module.fail_json(
msg="unable to remove user.. %s" % e
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jwlawson/tensorflow | tensorflow/python/grappler/layout_optimizer_test.py | 2 | 47275 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops._split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-PaddingsConst-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-Const_2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-DimsConst-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops._select(condition, conv, add)
output = array_ops.identity(select)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops._max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-Const_2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-Const_2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-StridedSlice/begin-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-StridedSlice/strides-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and end mask.
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-strided_slice/stack-LayoutOptimizer', nodes)
self.assertIn('strided_slice-strided_slice/stack_1-LayoutOptimizer',
nodes)
self.assertIn('strided_slice-strided_slice/stack_2-LayoutOptimizer',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-StridedSliceGrad/begin-LayoutOptimizer',
nodes)
self.assertIn('StridedSlice-StridedSliceGrad/strides-LayoutOptimizer',
nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session() as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session() as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNorm-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testGradient(self):
meta_graph = _simple_metagraph()
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON)
optimized_graph = tf_optimizer.OptimizeGraph(
rewrite_options, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Tomtomgo/phantomjs | src/breakpad/src/tools/gyp/test/variables/gyptest-commands-repeated.py | 138 | 1476 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more then once..
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands-repeated.gyp.stdout')
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = test.workpath()
test.run_gyp('commands-repeated.gyp',
'--debug', 'variables', '--debug', 'general',
stdout=expect)
# Verify the commands-repeated.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands-repeated.gypd').replace('\r\n', '\n')
expect = test.read('commands-repeated.gypd.golden')
if not test.match(contents, expect):
print "Unexpected contents of `commands-repeated.gypd'"
self.diff(expect, contents, 'commands-repeated.gypd ')
test.fail_test()
test.pass_test()
| bsd-3-clause |
navicore/pollenc | pollen/login_preparer.py | 2 | 1040 | # Copyright Amaret, Inc 2011-2015. All rights reserved.
# pylint: disable=missing-docstring
# pylint: disable=bad-whitespace
''' Pollen Cloud Compiler Client '''
import hashlib
import time
import os
import random
from pollen.scrlogger import ScrLogger
from pollen import utils
LOGGER = ScrLogger()
class LoginPreparer(object):
def __init__(self, args_):
self.args = args_
self.aid = str(os.getpid()) + '_' + str(random.randint(1, 10000))
self.jsonobj = {}
def _prep_request(self):
token = utils.anon_token()
tid = hashlib.sha1(str(time.time()) + '-' +
token).hexdigest()
jsonobj = {'tid': tid,
'aid': self.aid,
'type': 'request',
'service': 'login',
'user': {'token': token,
'id': 0,
'name': 'None'}}
self.jsonobj = jsonobj
def prepare(self):
self._prep_request()
return self.jsonobj
| gpl-2.0 |
LPM-HMS/COSMOS2 | cosmos/api.py | 2 | 6632 | # from .core.cmd_fxn.io import find, out_dir, forward
import contextlib
import importlib
import inspect
import json
import pprint
from functools import wraps
import funcsigs
import os
import re
from decorator import decorator
import sys
from cosmos import (
WorkflowStatus,
StageStatus,
TaskStatus,
NOOP,
signal_workflow_status_change,
signal_stage_status_change,
signal_task_status_change,
Dependency,
)
from cosmos.core.cmd_fxn.signature import default_cmd_fxn_wrapper
from cosmos.graph.draw import draw_task_graph, draw_stage_graph, pygraphviz_available
from cosmos.models.Cosmos import Cosmos, default_get_submit_args
from cosmos.models.Stage import Stage
from cosmos.models.Task import Task
from cosmos.models.Workflow import Workflow, default_task_log_output_dir
from cosmos.util.args import add_workflow_args
from cosmos.util.helpers import make_dict, isinstance_namedtuple
from cosmos.util.iterstuff import only_one
from cosmos.util.signal_handlers import SGESignalHandler, handle_sge_signals
def load_input(out_file):
pass
def arg_to_str(name, value):
if value is None:
return ""
if isinstance(value, bool):
return name if value else ""
else:
return "%s %s" % (name, value)
def args_to_str(*args):
"""
Turn a set of arguments into a string to be passed to a command line tool
If value is None or False it will be ignored.
If value is True, emit --{arg_flag} without specifing a value.
Otherwise emit --{arg_flag} {value}.
:param args: An iterable of (str arg_flag, value) tuples.
>>> x = 'bar'
>>> y = None
>>> z = 123
>>> f = True
>>> args_to_str(('--foo', x))
'--foo bar'
>>> args_to_str(('--flag', f))
'--flag'
>>> args_to_str(('--skip-me', y), ('--use-me', z))
'--use-me 123'
"""
return " \\\n".join(arg_to_str(k, v) for k, v in args if arg_to_str(k, v) != "")
@contextlib.contextmanager
def cd(path):
"""
A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
@decorator
def bash_call(func, *args, **kwargs):
"""
A function decorator which provides a way to avoid writing boilerplate argparse code when defining a Task.
It converts the decorated function to produce a bash script that uses python to import the function and
call it with the same arguments.
Current Limitations:
* Function must be importable
* No partials
>>> def echo(arg1, out_file='out.txt'):
... with open(out_file) as fp:
... print(arg1, file=fp)
>>> print(bash_call(echo)(arg1='hello world'))
<BLANKLINE>
python - <<EOF
<BLANKLINE>
try:
from cosmos.api import echo
except ImportError:
import imp
echo = imp.load_source('echo', '<doctest cosmos.api.bash_call[0]>').echo
<BLANKLINE>
echo(**
{'arg1': 'hello '
'world',
'out_file': 'out.txt'}
)
<BLANKLINE>
EOF
"""
sig = funcsigs.signature(func)
kwargs = dict(list(zip(list(sig.parameters.keys()), args)))
return r"""
python - <<EOF
try:
from {func.__module__} import {func.__name__}
except ImportError:
import imp
{func.__name__} = imp.load_source('{module_name}', '{source_file}').{func.__name__}
{func.__name__}(**
{param_str}
)
EOF""".format(
func=func,
module_name=func.__name__,
source_file=inspect.getsourcefile(func),
param_str=pprint.pformat(kwargs, width=1, indent=1),
)
def get_module_path_from_fname(fname):
"""
gets a module path from a filename if it is available by searching through sys.path
>>> get_module_path_from_fname(__file__)
'cosmos.api'
"""
fname = os.path.normpath(os.path.abspath(fname))
if not os.path.exists(fname):
raise FileExistsError("{} does not exist".format(fname))
# first element in sys.path is the working directory of the script, put it last in order of paths to try
for path in sys.path[1:] + sys.path[-1:]:
# if os.path.normpath(path) == os.path.normpath(os.getcwd()):
# # don't use cwd as path
# continue
path = os.path.normpath(path)
if fname.startswith(path):
module_rel_path = fname.replace(path + "/", "")
module_path = module_rel_path.replace(".py", "").replace("/", ".")
try:
importlib.import_module(module_path)
return module_path
except ModuleNotFoundError:
pass
else:
raise ModuleNotFoundError(f"could not find module for {fname}")
def _get_import_code_for_func(func):
filename = inspect.getfile(func)
if func.__module__ == "__main__":
source_file = os.path.abspath(filename)
assert os.path.exists(source_file)
try:
path = get_module_path_from_fname(filename)
parts = path.split("/")
return "from %s import %s" % (".".join(parts), func.__name__)
except ModuleNotFoundError:
# resort to importing the absolute path
source_file = os.path.abspath(filename)
return (
f"from importlib import machinery\n"
f'loader = machinery.SourceFileLoader("module", "{source_file}")\n'
f"mod = loader.load_module()\n"
f'{func.__name__} = getattr(mod, "{func.__name__}")'
)
else:
return "from %s import %s" % (func.__module__, func.__name__)
def py_call(func):
func.skip_wrap = True
@wraps(func)
def wrapped(*args, **kwargs):
class_imports = ""
args_str = ""
if len(args):
args_str += "*%s,\n" % args
raise NotImplementedError()
if len(kwargs):
args_str += "**%s" % pprint.pformat(kwargs, indent=2)
func_import_code = _get_import_code_for_func(func)
return r"""#!/usr/bin/env python
import logging
DEFAULT_LOG_FORMAT = "[%(name)s : %(asctime)-15s %(filename)s - %(funcName)s() ] %(message)s"
logging.basicConfig(format=DEFAULT_LOG_FORMAT, datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
{class_imports}{func_import_code}
# To use ipdb, uncomment the next two lines and tab over the function call
#import ipdb
#with ipdb.launch_ipdb_on_exception():
{func.__name__}(
{args_str}
)
""".format(
**locals()
)
return wrapped
# def py_call_cmd_wrapper(task):
# return py_call
| gpl-3.0 |
andythigpen/home-assistant | tests/test_component_api.py | 8 | 11764 | """
tests.test_component_http
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests Home Assistant HTTP component does what it should do.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
import json
import requests
import homeassistant as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.remote as remote
import homeassistant.components.http as http
from homeassistant.const import HTTP_HEADER_HA_AUTH
API_PASSWORD = "test1234"
# Somehow the socket that holds the default port does not get released
# when we close down HA in a different test case. Until I have figured
# out what is going on, let's run this test on a different port.
SERVER_PORT = 8120
HTTP_BASE_URL = "http://127.0.0.1:{}".format(SERVER_PORT)
HA_HEADERS = {HTTP_HEADER_HA_AUTH: API_PASSWORD}
hass = None
def _url(path=""):
""" Helper method to generate urls. """
return HTTP_BASE_URL + path
def setUpModule(): # pylint: disable=invalid-name
""" Initalizes a Home Assistant server. """
global hass
hass = ha.HomeAssistant()
hass.bus.listen('test_event', lambda _: _)
hass.states.set('test.test', 'a_state')
bootstrap.setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_API_PASSWORD: API_PASSWORD,
http.CONF_SERVER_PORT: SERVER_PORT}})
bootstrap.setup_component(hass, 'api')
hass.start()
def tearDownModule(): # pylint: disable=invalid-name
""" Stops the Home Assistant server. """
hass.stop()
class TestAPI(unittest.TestCase):
""" Test the API. """
# TODO move back to http component and test with use_auth.
def test_access_denied_without_password(self):
req = requests.get(
_url(remote.URL_API_STATES_ENTITY.format("test")))
self.assertEqual(401, req.status_code)
def test_access_denied_with_wrong_password(self):
req = requests.get(
_url(remote.URL_API_STATES_ENTITY.format("test")),
headers={HTTP_HEADER_HA_AUTH: 'wrongpassword'})
self.assertEqual(401, req.status_code)
def test_api_list_state_entities(self):
""" Test if the debug interface allows us to list state entities. """
req = requests.get(_url(remote.URL_API_STATES),
headers=HA_HEADERS)
remote_data = [ha.State.from_dict(item) for item in req.json()]
self.assertEqual(hass.states.all(), remote_data)
def test_api_get_state(self):
""" Test if the debug interface allows us to get a state. """
req = requests.get(
_url(remote.URL_API_STATES_ENTITY.format("test.test")),
headers=HA_HEADERS)
data = ha.State.from_dict(req.json())
state = hass.states.get("test.test")
self.assertEqual(state.state, data.state)
self.assertEqual(state.last_changed, data.last_changed)
self.assertEqual(state.attributes, data.attributes)
def test_api_get_non_existing_state(self):
""" Test if the debug interface allows us to get a state. """
req = requests.get(
_url(remote.URL_API_STATES_ENTITY.format("does_not_exist")),
headers=HA_HEADERS)
self.assertEqual(404, req.status_code)
def test_api_state_change(self):
""" Test if we can change the state of an entity that exists. """
hass.states.set("test.test", "not_to_be_set")
requests.post(_url(remote.URL_API_STATES_ENTITY.format("test.test")),
data=json.dumps({"state": "debug_state_change2"}),
headers=HA_HEADERS)
self.assertEqual("debug_state_change2",
hass.states.get("test.test").state)
# pylint: disable=invalid-name
def test_api_state_change_of_non_existing_entity(self):
""" Test if the API allows us to change a state of
a non existing entity. """
new_state = "debug_state_change"
req = requests.post(
_url(remote.URL_API_STATES_ENTITY.format(
"test_entity.that_does_not_exist")),
data=json.dumps({'state': new_state}),
headers=HA_HEADERS)
cur_state = (hass.states.
get("test_entity.that_does_not_exist").state)
self.assertEqual(201, req.status_code)
self.assertEqual(cur_state, new_state)
# pylint: disable=invalid-name
def test_api_state_change_with_bad_data(self):
""" Test if API sends appropriate error if we omit state. """
req = requests.post(
_url(remote.URL_API_STATES_ENTITY.format(
"test_entity.that_does_not_exist")),
data=json.dumps({}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
# pylint: disable=invalid-name
def test_api_fire_event_with_no_data(self):
""" Test if the API allows us to fire an event. """
test_value = []
def listener(event):
""" Helper method that will verify our event got called. """
test_value.append(1)
hass.bus.listen_once("test.event_no_data", listener)
requests.post(
_url(remote.URL_API_EVENTS_EVENT.format("test.event_no_data")),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
# pylint: disable=invalid-name
def test_api_fire_event_with_data(self):
""" Test if the API allows us to fire an event. """
test_value = []
def listener(event):
""" Helper method that will verify that our event got called and
that test if our data came through. """
if "test" in event.data:
test_value.append(1)
hass.bus.listen_once("test_event_with_data", listener)
requests.post(
_url(remote.URL_API_EVENTS_EVENT.format("test_event_with_data")),
data=json.dumps({"test": 1}),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
# pylint: disable=invalid-name
def test_api_fire_event_with_invalid_json(self):
""" Test if the API allows us to fire an event. """
test_value = []
def listener(event):
""" Helper method that will verify our event got called. """
test_value.append(1)
hass.bus.listen_once("test_event_bad_data", listener)
req = requests.post(
_url(remote.URL_API_EVENTS_EVENT.format("test_event_bad_data")),
data=json.dumps('not an object'),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(422, req.status_code)
self.assertEqual(0, len(test_value))
# Try now with valid but unusable JSON
req = requests.post(
_url(remote.URL_API_EVENTS_EVENT.format("test_event_bad_data")),
data=json.dumps([1, 2, 3]),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(422, req.status_code)
self.assertEqual(0, len(test_value))
def test_api_get_event_listeners(self):
""" Test if we can get the list of events being listened for. """
req = requests.get(_url(remote.URL_API_EVENTS),
headers=HA_HEADERS)
local = hass.bus.listeners
for event in req.json():
self.assertEqual(event["listener_count"],
local.pop(event["event"]))
self.assertEqual(0, len(local))
def test_api_get_services(self):
""" Test if we can get a dict describing current services. """
req = requests.get(_url(remote.URL_API_SERVICES),
headers=HA_HEADERS)
local_services = hass.services.services
for serv_domain in req.json():
local = local_services.pop(serv_domain["domain"])
self.assertEqual(local, serv_domain["services"])
def test_api_call_service_no_data(self):
""" Test if the API allows us to call a service. """
test_value = []
def listener(service_call):
""" Helper method that will verify that our service got called. """
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
requests.post(
_url(remote.URL_API_SERVICES_SERVICE.format(
"test_domain", "test_service")),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
def test_api_call_service_with_data(self):
""" Test if the API allows us to call a service. """
test_value = []
def listener(service_call):
""" Helper method that will verify that our service got called and
that test if our data came through. """
if "test" in service_call.data:
test_value.append(1)
hass.services.register("test_domain", "test_service", listener)
requests.post(
_url(remote.URL_API_SERVICES_SERVICE.format(
"test_domain", "test_service")),
data=json.dumps({"test": 1}),
headers=HA_HEADERS)
hass.pool.block_till_done()
self.assertEqual(1, len(test_value))
def test_api_event_forward(self):
""" Test setting up event forwarding. """
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({'host': '127.0.0.1'}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({'api_password': 'bla-di-bla'}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': 'bla-di-bla',
'host': '127.0.0.1',
'port': 'abcd'
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': 'bla-di-bla',
'host': '127.0.0.1',
'port': '8125'
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
# Setup a real one
req = requests.post(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({
'api_password': API_PASSWORD,
'host': '127.0.0.1',
'port': SERVER_PORT
}),
headers=HA_HEADERS)
self.assertEqual(200, req.status_code)
# Delete it again..
req = requests.delete(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({}),
headers=HA_HEADERS)
self.assertEqual(400, req.status_code)
req = requests.delete(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({
'host': '127.0.0.1',
'port': 'abcd'
}),
headers=HA_HEADERS)
self.assertEqual(422, req.status_code)
req = requests.delete(
_url(remote.URL_API_EVENT_FORWARD),
data=json.dumps({
'host': '127.0.0.1',
'port': SERVER_PORT
}),
headers=HA_HEADERS)
self.assertEqual(200, req.status_code)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.