repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Rasmus7700/Smarthome | plugins/cli/__init__.py | 8 | 7232 | #!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Copyright 2012-2013 Marcus Popp marcus@popp.mx
#########################################################################
# This file is part of SmartHome.py. http://mknx.github.io/smarthome/
#
# SmartHome.py is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHome.py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHome.py. If not, see <http://www.gnu.org/licenses/>.
#########################################################################
import logging
import threading
import lib.connection
logger = logging.getLogger('')
class CLIHandler(lib.connection.Stream):
terminator = '\n'.encode()
def __init__(self, smarthome, sock, source, updates):
lib.connection.Stream.__init__(self, sock, source)
self.source = source
self.updates_allowed = updates
self.sh = smarthome
self.push("SmartHome.py v{0}\n".format(self.sh.version))
self.push("Enter 'help' for a list of available commands.\n")
self.push("> ")
def push(self, data):
self.send(data.encode())
def found_terminator(self, data):
cmd = data.decode().strip()
if cmd.startswith('ls'):
self.push("Items:\n======\n")
self.ls(cmd.lstrip('ls').strip())
elif cmd == 'la':
self.la()
elif cmd == 'lo':
self.lo()
elif cmd == 'll':
self.lo()
elif cmd == 'lt':
self.lt()
elif cmd == 'cl':
self.cl()
elif cmd.startswith('update ') or cmd.startswith('up '):
self.update(cmd.lstrip('update').strip())
elif cmd.startswith('tr'):
self.tr(cmd.lstrip('tr').strip())
elif cmd.startswith('rl'):
self.rl(cmd.lstrip('rl').strip())
elif cmd.startswith('rr'):
self.rr(cmd.lstrip('rr').strip())
elif cmd == 'help' or cmd == 'h':
self.usage()
elif cmd in ('quit', 'q', 'exit', 'x'):
self.push('bye\n')
self.close()
return
self.push("> ")
def cl(self):
self.sh.log.clean(self.sh.now())
def ls(self, path):
if not path:
for item in self.sh:
self.push("{0}\n".format(item.id()))
else:
item = self.sh.return_item(path)
if hasattr(item, 'id'):
if item._type:
self.push("{0} = {1}\n".format(item.id(), item()))
else:
self.push("{}\n".format(item.id()))
for child in item:
self.ls(child.id())
else:
self.push("Could not find path: {}\n".format(path))
def la(self):
self.push("Items:\n======\n")
for item in self.sh.return_items():
if item._type:
self.push("{0} = {1}\n".format(item.id(), item()))
else:
self.push("{0}\n".format(item.id()))
def update(self, data):
if not self.updates_allowed:
self.push("Updating items is not allowed.\n")
return
path, sep, value = data.partition('=')
path = path.strip()
value = value.strip()
if not value:
self.push("You have to specify an item value. Syntax: up item = value\n")
return
item = self.sh.return_item(path)
if not hasattr(item, '_type'):
self.push("Could not find item with a valid type specified: '{0}'\n".format(path))
return
item(value, 'CLI', self.source)
def tr(self, logic):
if not self.updates_allowed:
self.push("Logic triggering is not allowed.\n")
return
if logic in self.sh.return_logics():
self.sh.trigger(logic, by='CLI')
else:
self.push("Logic '{0}' not found.\n".format(logic))
def rl(self, name):
if not self.updates_allowed:
self.push("Logic triggering is not allowed.\n")
return
if name in self.sh.return_logics():
logic = self.sh.return_logic(name)
logic.generate_bytecode()
else:
self.push("Logic '{0}' not found.\n".format(name))
def rr(self, name):
if not self.updates_allowed:
self.push("Logic triggering is not allowed.\n")
return
if name in self.sh.return_logics():
logic = self.sh.return_logic(name)
logic.generate_bytecode()
logic.trigger(by='CLI')
else:
self.push("Logic '{0}' not found.\n".format(name))
def lo(self):
self.push("Logics:\n")
for logic in self.sh.return_logics():
nt = self.sh.scheduler.return_next(logic)
if nt is not None:
self.push("{0} (scheduled for {1})\n".format(logic, nt.strftime('%Y-%m-%d %H:%M:%S%z')))
else:
self.push("{0}\n".format(logic))
def lt(self):
# list all threads with names
self.push("{0} Threads:\n".format(threading.activeCount()))
for t in threading.enumerate():
self.push("{0}\n".format(t.name))
def usage(self):
self.push('cl: clean (memory) log\n')
self.push('ls: list the first level items\n')
self.push('ls item: list item and every child item (with values)\n')
self.push('la: list all items (with values)\n')
self.push('lo: list all logics and next execution time\n')
self.push('lt: list current thread names\n')
self.push('update item = value: update the specified item with the specified value\n')
self.push('up: alias for update\n')
self.push('tr logic: trigger logic\n')
self.push('rl logic: reload logic\n')
self.push('rr logic: reload and run logic\n')
self.push('quit: quit the session\n')
self.push('q: alias for quit\n')
class CLI(lib.connection.Server):
def __init__(self, smarthome, update='False', ip='127.0.0.1', port=2323):
lib.connection.Server.__init__(self, ip, port)
self.sh = smarthome
self.updates_allowed = smarthome.string2bool(update)
def handle_connection(self):
sock, address = self.accept()
if sock is None:
return
logger.debug("{}: incoming connection from {} to {}".format(self._name, address, self.address))
CLIHandler(self.sh, sock, address, self.updates_allowed)
def run(self):
self.alive = True
def stop(self):
self.alive = False
self.close()
| gpl-3.0 |
jupierce/openshift-tools | scripts/monitoring/cron-send-docker-oc-versions.py | 7 | 3819 | #!/usr/bin/python
'''
Send openshift and docker versions with miq_metric tag to metric_sender
Example:
./cron-send-docker-oc-versions.py
'''
# Disabling invalid-name because pylint doesn't like the naming conention we have.
# pylint: disable=invalid-name,import-error
import json
import os
import sys
import subprocess
import argparse
from openshift_tools.monitoring.metric_sender import MetricSender
def parse_args():
'''Parse the arguments for this script'''
parser = argparse.ArgumentParser(description="Tool to send docker and openshift versions")
parser.add_argument('-d', '--debug', default=False, action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', default=False, action="store_true", help="Verbose?")
args = parser.parse_args()
return args
def add_specific_rpm_version(package_name, rpm_db_path, keys, mts, key_prefix=""):
'''get rpm package version and add to metric sender and keys dictionary
'''
try:
return_value = subprocess.check_output("rpm --dbpath {rpm_db_path} -q {package}".format(rpm_db_path=rpm_db_path,
package=package_name),
stderr=subprocess.STDOUT, shell=True)
if return_value.startswith(package_name):
package_version = return_value[len(package_name)+1:len(return_value)-1]
key = "{prefix}{name}.version".format(prefix=key_prefix, name=package_name)
tags = {"descriptor_name": "{name}.version".format(name=package_name),
"miq_metric": "true"}
mts.add_metric({key: package_version}, key_tags=tags)
keys[key] = package_version
return True, None
except subprocess.CalledProcessError as err:
return False, err
def main():
'''get docker and openshift versions and send to metric sender
'''
args = parse_args()
mts = MetricSender(verbose=args.verbose, debug=args.debug)
# Check if host rpm db is mounted. Otherwise check againts container db
rpm_db_path = "/host/var/lib/rpm"
if not os.path.exists(rpm_db_path):
rpm_db_path = "/var/lib/rpm"
keys = {}
# Accumulate failures
failures = 0
# Get docker version
success, err = add_specific_rpm_version("docker", rpm_db_path, keys, mts)
if not success:
failures += 1
print "Failed to get docker rpm version. " + err.output
openshift_package_name = "origin"
# Get openshift node version (attempt upstream)
success, err = add_specific_rpm_version("{}-node".format(openshift_package_name), rpm_db_path, keys, mts,
"openshift.node.")
if not success:
# Get openshift version (attempt downstream)
openshift_package_name = "atomic-openshift"
success, err2 = add_specific_rpm_version("{}-node".format(openshift_package_name), rpm_db_path, keys, mts,
"openshift.node.")
if not success:
failures += 1
print "Failed to get openshift rpm version:\n" + err.output + + err2.output
# Get openshift master version (upstream or downstream) - only if node rpm found
if success:
success, err = add_specific_rpm_version("{}-master".format(openshift_package_name), rpm_db_path, keys, mts,
"openshift.master.")
if not success:
# Print notification but don't count this as failure
print "Note: " + err.output
print "Sending these metrics:"
print json.dumps(keys, indent=4)
mts.send_metrics()
print "\nDone.\n"
sys.exit(failures)
if __name__ == '__main__':
main()
| apache-2.0 |
ClearCorp/odoo-clearcorp | TODO-8.0/account_aged_partner_balance_report/wizard/account_aged_partner_balance_report_wizard.py | 3 | 7106 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import osv, fields
from openerp.tools.translate import _
class accountAgedpartnerBalanceWizard(osv.osv_memory):
_inherit = "account.report.wiz"
_name = 'aged.partner.balance.wiz'
_description = 'Account Aged Partner Balance Report'
_columns = {
'period_length':fields.integer('Period Length (days)'),
'direction_selection': fields.selection([('past','Past'),('future','Future')], 'Analysis Direction'),
'account_type': fields.selection([('customer','Receivable Accounts'),
('supplier','Payable Accounts'),
('customer_supplier','Receivable and Payable Accounts')],"Account type",),
}
_defaults = {
'period_length': 30,
'date_from': lambda *a: time.strftime('%Y-%m-%d'),
'direction_selection': 'past',
'filter': 'filter_date',
}
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
# read period_length and direction_selection, because those fields don't belongs to the account.report.wiz
vals = self.read(cr, uid, ids,['period_length','direction_selection','account_type'], context=context)[0] #this method read the field and included it in the form (account.common.report has this method)
data['form'].update(vals)
return data
def _print_report(self, cr, uid, ids, data, context=None):
res = {}
mimetype = self.pool.get('report.mimetypes')
report_obj = self.pool.get('ir.actions.report.xml')
report_name = ''
if context is None:
context = {}
# we update form with display account value
data = self.pre_print_report(cr, uid, ids, data, context=context)
period_length = data['form']['period_length']
if period_length <= 0:
raise osv.except_osv(_('UserError'), _('You must enter a period length that cannot be 0 or below !'))
#In this section, create interval times, depends of period length parameter
start = datetime.strptime(data['form']['date_from'], "%Y-%m-%d")
if data['form']['direction_selection'] == 'past':
for i in range(5)[::-1]:
stop = start - relativedelta(days=period_length)
res[str(i)] = {
'name': (i!=0 and (str((5-(i+1)) * period_length) + '-' + str((5-i) * period_length)) or ('+'+str(4 * period_length))),
'stop': start.strftime('%Y-%m-%d'),
'start': (i!=0 and stop.strftime('%Y-%m-%d') or False),
}
start = stop - relativedelta(days=1)
else:
for i in range(5):
stop = start + relativedelta(days=period_length)
res[str(5-(i+1))] = {
'name': (i!=4 and str((i) * period_length)+'-' + str((i+1) * period_length) or ('+'+str(4 * period_length))),
'start': start.strftime('%Y-%m-%d'),
'stop': (i!=4 and stop.strftime('%Y-%m-%d') or False),
}
start = stop + relativedelta(days=1)
data['form'].update(res)
#=======================================================================
# onchange_in_format method changes variable out_format depending of
# which in_format is choosed.
# If out_format is pdf -> call record in odt format and if it's choosed
# ods or xls -> call record in ods format.
# ods and xls format are editable format, because they are arranged
# to be changed by user and, for example, user can check and change info.
#=======================================================================
#=======================================================================
# If mimetype is PDF -> out_format = PDF (search odt record)
# If mimetype is xls or ods -> search ods record.
# If record doesn't exist, return a error.
#=======================================================================
#=======================================================================
# Create two differents records for each format, depends of the out_format
# selected, choose one of this records
#=======================================================================
#1. Find out_format selected
out_format_obj = mimetype.browse(cr, uid, [int(data['form']['out_format'])], context)[0]
#2. Check out_format and set report_name for each format
if out_format_obj.code == 'oo-pdf':
report_name = 'account_aged_partner_balance_odt'
elif out_format_obj.code == 'oo-xls' or out_format_obj.code == 'oo-ods':
report_name = 'account_aged_partner_balance_ods'
# If there not exist name, it's because not exist a record for this format
if report_name == '':
raise osv.except_osv(_('Error !'), _('There is no template defined for the selected format. Check if aeroo report exist.'))
else:
#Search record that match with the name, and get some extra information
report_xml_id = report_obj.search(cr, uid, [('report_name','=', report_name)],context=context)
report_xml = report_obj.browse(cr, uid, report_xml_id, context=context)[0]
data.update({'model': report_xml.model, 'report_type':'aeroo', 'id': report_xml.id})
#Write out_format choosed in wizard
report_xml.write({'out_format': out_format_obj.id}, context=context)
return {
'type': 'ir.actions.report.xml',
'report_name': report_name,
'datas': data,
'context':context
}
| agpl-3.0 |
nateberman/Python_Koans | python2/koans/about_string_manipulation.py | 73 | 2590 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual(__, string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual(__, string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5), \
decimal_places)
self.assertEqual(__, string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(__, ord('a'))
self.assertEqual(__, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertEqual([__, __, __], words)
def test_strings_can_be_split_with_different_patterns(self):
import re # import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertEqual([__, __, __, __], words)
# `pattern` is a Python regular expression pattern which matches
# ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(__, string)
self.assertEqual(__, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual(__, ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual(__, 'guido'.capitalize())
self.assertEqual(__, 'guido'.upper())
self.assertEqual(__, 'TimBot'.lower())
self.assertEqual(__, 'guido van rossum'.title())
self.assertEqual(__, 'ToTaLlY aWeSoMe'.swapcase())
| mit |
danfmsouza/yipservicedesk | includes/fckeditor/editor/filemanager/connectors/py/fckoutput.py | 25 | 3923 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
if number != 1:
return """<Error number="%s" />""" % (number)
else:
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| gpl-2.0 |
mtholder/peyotl | peyotl/api/treemachine.py | 2 | 14599 | #!/usr/bin/env python
from peyotl.utility import get_config_object, get_logger
from peyotl.api.wrapper import _WSWrapper, APIWrapper
from peyotl.api.study_ref import StudyRef
from peyotl.api.taxon import TaxonWrapper, TaxonHolder
import anyjson
_LOG = get_logger(__name__)
_EMPTY_TUPLE = tuple()
def _treemachine_tax_source2dict(tax_source):
d = {}
tax_source = tax_source.strip()
if not tax_source:
return d
r = [i.strip() for i in tax_source.split(',')]
for el in r:
k, v = el.split(':')
d[k] = v
return d
class GoLNode(TaxonHolder):
def __init__(self,
prop_dict,
treemachine_wrapper=None,
graph_of_life=None,
taxon=None,
nearest_taxon=None,
node_id=None):
self._treemachine_wrapper = treemachine_wrapper
self._graph_of_life = graph_of_life
if node_id is None:
self._node_id = prop_dict['mrca_node_id']
else:
self._node_id = node_id
if taxon is None:
oi = prop_dict.get('ott_id')
if oi == 'null':
oi = None
if oi is not None:
taxon_dict = {'ot:ottId': oi,
'rank': prop_dict.get('mrca_rank'),
'ot:ottTaxonName': prop_dict.get('mrca_name'),
'unique_name': prop_dict.get('mrca_unique_name'),
'treemachine_node_id': self.node_id
}
# TODO should write wrappers for getting the taxomachine wrapper from treemachine wrapper...
taxon = TaxonWrapper(prop_dict=taxon_dict)
if nearest_taxon is None:
taxon_dict = {'ot:ottId': prop_dict['nearest_taxon_mrca_ott_id'],
'rank': prop_dict.get('nearest_taxon_mrca_rank'),
'ot:ottTaxonName': prop_dict.get('nearest_taxon_mrca_name'),
'unique_name': prop_dict.get('nearest_taxon_mrca_unique_name'),
'treemachine_node_id': prop_dict.get('nearest_taxon_mrca_node_id')
}
assert prop_dict['nearest_taxon_mrca_ott_id'] != 'null'
# TODO should write wrappers for getting the taxomachine wrapper from treemachine wrapper...
self._nearest_taxon = TaxonWrapper(prop_dict=taxon_dict)
else:
self._nearest_taxon = nearest_taxon
TaxonHolder.__init__(self, taxon)
if self._taxon is not None:
assert (nearest_taxon is None) or (nearest_taxon is self._taxon)
self._nearest_taxon = self._taxon
self._subtree_newick = None
self._synth_sources = prop_dict.get('synth_sources')
self._in_synth_tree = prop_dict.get('in_synth_tree')
self._tax_source = prop_dict.get('tax_source')
self._in_graph = prop_dict.get('in_graph')
self._num_tips = prop_dict.get('num_tips')
self._num_synth_children = prop_dict.get('num_synth_children')
@property
def node_info_fetched(self):
return self._in_graph is not None
def fetch_node_info(self):
prop_dict = self._treemachine_wrapper.node_info(node_id=self.node_id)
self._synth_sources = [StudyRef(i) for i in prop_dict.get('synth_sources', [])]
self._in_synth_tree = prop_dict.get('in_synth_tree')
self._tax_source = _treemachine_tax_source2dict(prop_dict.get('tax_source', ''))
self._in_graph = bool(prop_dict.get('in_graph'))
self._num_tips = prop_dict.get('num_tips')
self._num_synth_children = prop_dict.get('num_synth_children')
if prop_dict['ott_id'] not in [None, 'null']:
assert prop_dict['ott_id'] == self.ott_id
assert prop_dict['ott_id'] == self.ott_id
assert prop_dict['rank'] == self.rank
assert prop_dict['node_id'] == self.node_id
@property
def synth_sources(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._synth_sources
@property
def in_synth_tree(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._in_synth_tree
@property
def tax_source(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._tax_source
@property
def in_graph(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._in_graph
@property
def num_tips(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._num_tips
@property
def num_synth_children(self):
if not self.node_info_fetched:
self.fetch_node_info()
return self._num_synth_children
@property
def subtree_newick(self):
if self._subtree_newick is None:
r = self._treemachine_wrapper.get_synthetic_tree(node_id=self.node_id)
if self._graph_of_life:
assert r['tree_id'] == self._graph_of_life['tree_id']
self._subtree_newick = r['newick']
return self._subtree_newick
@property
def node_id(self):
return self._node_id
@property
def nearest_taxon(self):
return self._nearest_taxon
def write_report(self, output):
self._taxon.write_report(output)
@property
def is_taxon(self):
return self._taxon is not None
@property
def treemachine_node_id(self):
return self._node_id
class MRCAGoLNode(GoLNode):
def __init__(self, prop_dict, treemachine_wrapper=None, graph_of_life=None):
GoLNode.__init__(self, prop_dict, treemachine_wrapper=treemachine_wrapper, graph_of_life=graph_of_life)
x = prop_dict.get('invalid_node_ids')
self._invalid_node_ids = tuple(x) if x else _EMPTY_TUPLE
x = prop_dict.get('invalid_ott_ids')
self._invalid_ott_ids = tuple(x) if x else _EMPTY_TUPLE
x = prop_dict.get('node_ids_not_in_tree')
self._node_ids_not_in_tree = tuple(x) if x else _EMPTY_TUPLE
x = prop_dict.get('ott_ids_not_in_tree')
self._ott_ids_not_in_tree = tuple(x) if x else _EMPTY_TUPLE
@property
def invalid_node_ids(self):
return self._invalid_node_ids
@property
def invalid_ott_ids(self):
return self._invalid_ott_ids
@property
def node_ids_not_in_tree(self):
return self._node_ids_not_in_tree
@property
def ott_ids_not_in_tree(self):
return self._ott_ids_not_in_tree
class _TreemachineAPIWrapper(_WSWrapper):
def __init__(self, domain, **kwargs):
self._config = kwargs.get('config')
if self._config is None:
self._config = get_config_object()
self._current_synth_info = None
self._current_synth_id = None
self.prefix = None
r = self._config.get_from_config_setting_cascade([('apis', 'treemachine_raw_urls'),
('apis', 'raw_urls')],
"FALSE")
self._raw_urls = (r.lower() == 'true')
self._api_vers = self._config.get_from_config_setting_cascade([('apis', 'treemachine_api_version'),
('apis', 'api_version')],
"2")
self.use_v1 = (self._api_vers == "1")
_WSWrapper.__init__(self, domain, **kwargs)
self.domain = domain
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, d): # pylint: disable=W0221
self._current_synth_info = None
self._current_synth_id = None
self._domain = d
if self._raw_urls:
self.prefix = '{d}/treemachine/ext/GoLS/graphdb'.format(d=d)
elif self.use_v1:
self.prefix = '{d}/treemachine/v1'.format(d=d)
else:
self.prefix = '{d}/v2/tree_of_life'.format(d=d)
self.graph_prefix = '{d}/v2/graph'.format(d=d)
@property
def current_synth_tree_id(self):
if self._current_synth_info is None:
self._current_synth_info = self.synthetic_tree_info
if self.use_v1:
self._current_synth_id = self._current_synth_info['draftTreeName']
else:
self._current_synth_id = self._current_synth_info['tree_id']
return self._current_synth_id
@property
def synthetic_tree_info(self):
if self.use_v1:
uri = '{p}/getDraftTreeID'.format(p=self.prefix)
else:
uri = '{p}/about'.format(p=self.prefix)
return self.json_http_post_raise(uri)
@property
def synthetic_tree_id_list(self):
if self.use_v1:
uri = '{p}/getSourceTreeIDs'.format(p=self.prefix)
return self.json_http_post_raise(uri)
r = self.synthetic_tree_info
raw_study_list = r['study_list']
return raw_study_list
@property
def synthetic_source_list(self):
uri = '{p}/getSynthesisSourceList'.format(p=self.prefix)
return self.json_http_post_raise(uri)
# deprecated due to https://github.com/OpenTreeOfLife/treemachine/issues/170
# format is redefined to match API
# pylint: disable=W0622
# def get_source_tree(self, tree_id=None, format='newick', node_id=None, max_depth=None, **kwargs):
# if self.use_v1:
# uri = '{p}/getSourceTree'.format(p=self.prefix)
# return self._get_tree(uri, tree_id, format=format, node_id=node_id, max_depth=max_depth)
# else:
# uri = '{p}/source_tree'.format(p=self.graph_prefix)
# study_id = kwargs.get('study_id', '')
# if len(study_id) < 3 or study_id[2] != '_':
# study_id = 'pg_' + study_id
# data = {'git_sha': kwargs.get('git_sha', ''),
# 'study_id': study_id,
# 'tree_id': tree_id}
# return self.json_http_post_raise(uri, data=anyjson.dumps(data))
# noinspection PyShadowingBuiltins
def get_synthetic_tree(self, tree_id=None, format='newick', node_id=None, max_depth=None,
ott_id=None): # pylint: disable=W0622
if self.use_v1:
uri = '{p}/getSyntheticTree'.format(p=self.prefix)
else:
uri = '{p}/subtree'.format(p=self.prefix)
return self._get_tree(uri,
tree_id=tree_id,
format=format,
node_id=node_id,
max_depth=max_depth,
ott_id=ott_id)
def node_info(self, node_id=None, ott_id=None, include_lineage=False):
if self.use_v1:
raise NotImplementedError('node_info was added in v2 of the API')
uri = '{p}/node_info'.format(p=self.graph_prefix)
data = {'include_lineage': bool(include_lineage)}
if node_id and ott_id:
raise ValueError('You can only specify one of node_id or ott_id')
if not node_id and not ott_id:
raise ValueError('You must specify one of node_id or ott_id')
if node_id:
data['node_id'] = int(node_id)
else:
data['ott_id'] = int(ott_id)
return self.json_http_post_raise(uri, data=anyjson.dumps(data))
def mrca(self, ott_ids=None, node_ids=None, wrap_response=False):
if not (ott_ids or node_ids):
raise ValueError('ott_ids or node_ids must be specified')
assert not self.use_v1
uri = '{p}/mrca'.format(p=self.prefix)
data = {'ott_ids': ott_ids, 'node_ids': node_ids}
resp = self.json_http_post_raise(uri, data=anyjson.dumps(data))
if wrap_response:
return MRCAGoLNode(resp, treemachine_wrapper=self)
return resp
def get_synth_tree_pruned(self, tree_id=None, node_ids=None, ott_ids=None):
if (tree_id is not None) and (tree_id != self.current_synth_tree_id):
raise NotImplementedError("Treemachine's getDraftTreeSubtreeForNodes does not take a tree ID yet")
data = {}
if self.use_v1:
if node_ids:
data['nodeIds'] = node_ids
if ott_ids:
data['ottIds'] = ott_ids
else:
if node_ids:
data['node_ids'] = node_ids
if ott_ids:
data['ott_ids'] = ott_ids
if not data:
raise ValueError('Either "node_ids" or "ott_ids" must be supplied')
if self.use_v1:
uri = '{p}/getDraftTreeSubtreeForNodes'.format(p=self.prefix)
else:
uri = '{p}/induced_subtree'.format(p=self.prefix)
return self.json_http_post_raise(uri, data=anyjson.dumps(data))
induced_subtree = get_synth_tree_pruned
def _get_tree(self, uri, tree_id, format='newick', node_id=None, max_depth=None,
ott_id=None): # pylint: disable=W0622
if tree_id is None:
tree_id = self.current_synth_tree_id
if node_id is None and ott_id is None:
raise ValueError('"node_id" or "ott_id" must be specified')
format_list = ['newick', 'arguson']
if format.lower() not in format_list:
raise ValueError('Tree "format" must be a value in {}'.format(repr(format_list)))
if self.use_v1:
data = {'treeID': tree_id,
'format': format}
if node_id is not None:
data['subtreeNodeID'] = str(node_id)
if max_depth is not None:
data['maxDepth'] = max_depth
else:
data = {'tree_id': tree_id, }
if node_id is not None:
data['node_id'] = str(node_id)
else:
if ott_id is None:
return ValueError('ott_id or node_id must be specified')
data['ott_id'] = ott_id
return self.json_http_post_raise(uri, data=anyjson.dumps(data))
def get_node_id_for_ott_id(self, ott_id):
uri = '{p}/getNodeIDForottId'.format(p=self.prefix)
data = {'ottId': str(ott_id)}
return self.json_http_post_raise(uri, data=anyjson.dumps(data))
def Treemachine(domains=None, **kwargs):
return APIWrapper(domains=domains, **kwargs).treemachine
| bsd-2-clause |
samarthmed/emacs-config | .python-environments/default/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py | 442 | 2304 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| gpl-2.0 |
fedora-conary/rbuild | rbuild_test/unit_test/handletest.py | 2 | 5120 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rbuild_test import rbuildhelp
from testutils import mock
from rbuild import errors
from rbuild import handle
from rbuild import rbuildcfg
from rbuild import ui
class HandleTest(rbuildhelp.RbuildHelper):
def preHook(self, *args, **kw):
print 'hello world'
def postHook(self, rv, *args, **kw):
return rv
def testInstallApiHook(self):
h = self.getRbuildHandle()
err = self.assertRaises(errors.MissingPluginError,
h.installPrehook, self.testInstallApiHook,
self.preHook)
self.assertEquals(err.pluginName, 'HandleTest')
err = self.assertRaises(errors.MissingPluginError,
h.installPosthook, self.testInstallApiHook,
self.postHook)
self.assertEquals(err.pluginName, 'HandleTest')
#pylint: disable-msg=E1111
mockedPre = mock.mockMethod(h.Config._installPrehook)
h.installPrehook(h.Config.displayConfig, self.preHook)
mockedPre._mock.assertCalled('displayConfig', self.preHook)
mockedPost = mock.mockMethod(h.Config._installPosthook)
h.installPosthook(h.Config.displayConfig, self.postHook)
mockedPost._mock.assertCalled('displayConfig', self.postHook)
def testGetConfig(self):
h = self.getRbuildHandle()
assert(h.getConfig() is self.rbuildCfg)
def testGetDefaultConfig(self):
mock.mock(handle.RbuildHandle, 'configClass')
h = handle.RbuildHandle(cfg=None, pluginManager=mock.MockObject())
handle.RbuildHandle.configClass._mock.assertCalled(readConfigFiles=True)
def testRbuildConfigPath(self):
productStore = mock.MockObject()
cfg = mock.MockObject()
handle = self.getRbuildHandle(cfg=cfg,
pluginManager=mock.MockObject(),
productStore=productStore)
cfg.read._mock.assertCalled(self.workDir + '/rbuildrc', exception=False)
def testNoRbuildConfigPath(self):
class mockProductStore(object):
def __init__(self):
self.getRbuildConfigData=mock.MockObject()
def setHandle(self, handle):
pass
def getProduct(self):
return 'product'
productStore = mockProductStore()
cfg = mock.MockObject()
productStore.getRbuildConfigData._mock.setDefaultReturn('rbuildConfigData')
handle = self.getRbuildHandle(cfg=cfg,
pluginManager=mock.MockObject(),
productStore=productStore)
self.assertEquals(handle.product, 'product')
cfg.readObject._mock.assertCalled('INTERNAL', 'rbuildConfigData')
def testProxyMissingPlugin(self):
"""
Handle should raise a KeyError when a missing plugin is accessed
"""
h = self.getRbuildHandle()
try:
h.SomePlugin.dostuff()
except errors.MissingPluginError, e_value:
self.failUnlessEqual(e_value.pluginName, "SomePlugin")
self.failUnlessEqual(str(e_value),
"Plugin 'SomePlugin' is not loaded")
else:
self.fail("Handle did not raise KeyError for missing plugin")
def testRepr(self):
"""
Check repr() values with and without a product loaded
"""
handle1 = self.getRbuildHandle()
self.failUnlessEqual(repr(handle1),
'<RbuildHandle at %s>' % hex(id(handle1)))
handle2 = self.getRbuildHandle()
handle2.product = mock.MockObject()
handle2.product._mock.set(getProductDefinitionLabel=lambda: 'dummy@label')
self.failUnlessEqual(repr(handle2),
'<RbuildHandle at %s, product dummy@label>' % hex(id(handle2)))
class Command(object):
commands = ['foo', 'bar']
class Command2(object):
commands = ['bam', 'baz']
class CommandManagerTest(rbuildhelp.RbuildHelper):
def testCommandManager(self):
cm = handle.CommandManager()
cm.registerCommand(Command)
cm.registerCommand(Command2)
self.assertEquals(cm.getCommandClass('foo'), Command)
self.assertEquals(cm.getCommandClass('bar'), Command)
self.assertEquals(cm.getCommandClass('bam'), Command2)
self.assertEquals(cm.getCommandClass('baz'), Command2)
self.assertEquals(cm.getAllCommandClasses(), set([Command, Command2]))
| apache-2.0 |
AkihikoITOH/capybara | capybara/virtualenv/lib/python2.7/site-packages/requests/packages/chardet/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| mit |
alexlo03/ansible | lib/ansible/modules/notification/mattermost.py | 43 | 4764 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Benjamin Jolivot <bjolivot@gmail.com>
# Inspired by slack module :
# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
# # (c) 2016, René Moser <mail@renemoser.net>
# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: mattermost
short_description: Send Mattermost notifications
description:
- Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
options:
url:
description:
- Mattermost url (i.e. http://mattermost.yourcompany.com).
required: true
api_key:
description:
- Mattermost webhook api key. Log into your mattermost site, go to
Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
This will give you full URL. api_key is the last part.
http://mattermost.example.com/hooks/C(API_KEY)
required: true
text:
description:
- Text to send. Note that the module does not handle escaping characters.
required: true
channel:
description:
- Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
username:
description:
- This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
default: Ansible
icon_url:
description:
- Url for the message sender's icon.
default: https://www.ansible.com/favicon.ico
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: yes
type: bool
"""
EXAMPLES = """
- name: Send notification message via Mattermost
mattermost:
url: http://mattermost.example.com
api_key: my_api_key
text: '{{ inventory_hostname }} completed'
- name: Send notification message via Mattermost all options
mattermost:
url: http://mattermost.example.com
api_key: my_api_key
text: '{{ inventory_hostname }} completed'
channel: notifications
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
"""
RETURN = '''
payload:
description: Mattermost payload
returned: success
type: string
webhook_url:
description: URL the webhook is sent to
returned: success
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
url=dict(type='str', required=True),
api_key=dict(type='str', required=True, no_log=True),
text=dict(type='str', required=True),
channel=dict(type='str', default=None),
username=dict(type='str', default='Ansible'),
icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
validate_certs=dict(default='yes', type='bool'),
)
)
# init return dict
result = dict(changed=False, msg="OK")
# define webhook
webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
result['webhook_url'] = webhook_url
# define payload
payload = {}
for param in ['text', 'channel', 'username', 'icon_url']:
if module.params[param] is not None:
payload[param] = module.params[param]
payload = module.jsonify(payload)
result['payload'] = payload
# http headers
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
# notes:
# Nothing is done in check mode
# it'll pass even if your server is down or/and if your token is invalid.
# If someone find good way to check...
# send request if not in test mode
if module.check_mode is False:
response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
# something's wrong
if info['status'] != 200:
# some problem
result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
module.fail_json(**result)
# Looks good
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
zengenti/ansible | lib/ansible/modules/monitoring/datadog_monitor.py | 16 | 13293 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# import module snippets
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'muted', 'unmuted']
tags:
description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
required: false
default: None
version_added: "2.2"
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
required: false
default: null
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
required: false
default: null
name:
description: ["The name of the alert."]
required: true
message:
description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
required: false
default: null
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
required: false
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
required: false
default: False
no_data_timeframe:
description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
required: false
default: null
renotify_interval:
description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
required: false
default: null
escalation_message:
description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
required: false
default: null
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
required: false
default: False
thresholds:
description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
required: false
default: False
version_added: "2.2"
require_full_window:
description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."]
required: false
default: null
version_added: "2.3"
id:
description: ["The id of the alert. If set, will be used instead of the name to locate the alert."]
required: false
default: null
version_added: "2.3"
'''
EXAMPLES = '''
# Create a metric monitor
datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
tags=dict(required=False, type='list', default=None),
locked=dict(required=False, default=False, type='bool'),
require_full_window=dict(required=False, default=None, type='bool'),
id=dict(required=False)
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
if module.params['id'] is not None:
monitor = api.Monitor.get(module.params['id'])
if 'errors' in monitor:
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
return monitor
else:
monitors = api.Monitor.get_all()
for monitor in monitors:
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window" : module.params['require_full_window']
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None
and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
biddisco/VTK | Imaging/Core/Testing/Python/ResliceInterpolationModes.py | 20 | 2894 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this script tests vtkImageReslice with different interpolation modes
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
reslice1 = vtk.vtkImageReslice()
reslice1.SetInputConnection(reader.GetOutputPort())
reslice1.SetInterpolationModeToCubic()
reslice1.SetOutputSpacing(0.65,0.65,1.5)
reslice1.SetOutputOrigin(80,120,40)
reslice1.SetOutputExtent(0,63,0,63,0,0)
reslice2 = vtk.vtkImageReslice()
reslice2.SetInputConnection(reader.GetOutputPort())
reslice2.SetInterpolationModeToLinear()
reslice2.SetOutputSpacing(0.65,0.65,1.5)
reslice2.SetOutputOrigin(80,120,40)
reslice2.SetOutputExtent(0,63,0,63,0,0)
reslice3 = vtk.vtkImageReslice()
reslice3.SetInputConnection(reader.GetOutputPort())
reslice3.SetInterpolationModeToNearestNeighbor()
reslice3.SetOutputSpacing(0.65,0.65,1.5)
reslice3.SetOutputOrigin(80,120,40)
reslice3.SetOutputExtent(0,63,0,63,0,0)
reslice4 = vtk.vtkImageReslice()
reslice4.SetInputConnection(reader.GetOutputPort())
reslice4.SetInterpolationModeToLinear()
reslice4.SetOutputSpacing(3.2,3.2,1.5)
reslice4.SetOutputOrigin(0,0,40)
reslice4.SetOutputExtent(0,63,0,63,0,0)
mapper1 = vtk.vtkImageMapper()
mapper1.SetInputConnection(reslice1.GetOutputPort())
mapper1.SetColorWindow(2000)
mapper1.SetColorLevel(1000)
mapper1.SetZSlice(0)
mapper2 = vtk.vtkImageMapper()
mapper2.SetInputConnection(reslice2.GetOutputPort())
mapper2.SetColorWindow(2000)
mapper2.SetColorLevel(1000)
mapper2.SetZSlice(0)
mapper3 = vtk.vtkImageMapper()
mapper3.SetInputConnection(reslice3.GetOutputPort())
mapper3.SetColorWindow(2000)
mapper3.SetColorLevel(1000)
mapper3.SetZSlice(0)
mapper4 = vtk.vtkImageMapper()
mapper4.SetInputConnection(reslice4.GetOutputPort())
mapper4.SetColorWindow(2000)
mapper4.SetColorLevel(1000)
mapper4.SetZSlice(0)
actor1 = vtk.vtkActor2D()
actor1.SetMapper(mapper1)
actor2 = vtk.vtkActor2D()
actor2.SetMapper(mapper2)
actor3 = vtk.vtkActor2D()
actor3.SetMapper(mapper3)
actor4 = vtk.vtkActor2D()
actor4.SetMapper(mapper4)
imager1 = vtk.vtkRenderer()
imager1.AddActor2D(actor1)
imager1.SetViewport(0.5,0.0,1.0,0.5)
imager2 = vtk.vtkRenderer()
imager2.AddActor2D(actor2)
imager2.SetViewport(0.0,0.0,0.5,0.5)
imager3 = vtk.vtkRenderer()
imager3.AddActor2D(actor3)
imager3.SetViewport(0.5,0.5,1.0,1.0)
imager4 = vtk.vtkRenderer()
imager4.AddActor2D(actor4)
imager4.SetViewport(0.0,0.5,0.5,1.0)
imgWin = vtk.vtkRenderWindow()
imgWin.AddRenderer(imager1)
imgWin.AddRenderer(imager2)
imgWin.AddRenderer(imager3)
imgWin.AddRenderer(imager4)
imgWin.SetSize(150,128)
imgWin.Render()
# --- end of script --
| bsd-3-clause |
sbmlteam/deviser | generator/base_files/BaseInterfaceFile.py | 1 | 2287 | #!/usr/bin/env python
#
# @file BaseCMakeFile.py
# @brief base class for all interface files to be generated
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2018 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from . import BaseFile
class BaseInterfaceFile(BaseFile.BaseFile):
"""Common base class for all interface files"""
def __init__(self, name):
BaseFile.BaseFile.__init__(self, name, 'i')
########################################################################
| lgpl-2.1 |
ivanamihalek/tcga | tcga/01_somatic_mutations/026_per_gene_stats.py | 1 | 3818 | #!/usr/bin/python -u
# needed the index on hugoSymbol for this to work with any speed:
#
# This source code is part of tcga, a TCGA processing pipeline, written by Ivana Mihalek.
# Copyright (C) 2014-2016 Ivana Mihalek.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see<http://www.gnu.org/licenses/>.
#
# Contact: ivana.mihalek@gmail.com
#
# needed the index on hugoSymbol for this to work with any speed:
# create index hugo_idx on somatic_mutations (hugoSymbol);
import sys, os
import MySQLdb
from tcga_utils.mysql import *
from tcga_utils.utils import *
from tcga_utils.ensembl import *
#########################################
def main():
db = connect_to_mysql()
cursor = db.cursor()
db_names = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD","ESCA", "GBM", "HNSC", "KICH" ,"KIRC",
"KIRP","LAML", "LGG", "LIHC", "LUAD", "LUSC", "OV", "PAAD", "PCPG", "PRAD", "REA",
"SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"]
genes = ["AKAP13", "ESR1", "HAND2", "PRKACA", "PRKAR2A", "PRKAR2B", "PRKCA"]
genes = []
full_name = read_cancer_names ()
table = 'somatic_mutations'
for db_name in db_names:
print "######################################"
print db_name, full_name[db_name]
switch_to_db (cursor, db_name)
############################
print "number of entries:",
qry = "select count(1) from " + table
rows = search_db(cursor, qry)
print rows[0][0]
############################
############################
print "number of patients:",
qry = "select distinct(sample_barcode_short) from somatic_mutations"
rows = search_db(cursor, qry)
patients = [row[0] for row in rows]
total_patients = len(patients)
print total_patients
############################
if not genes: # go for all of them
print "number of different genes:"
qry = "select distinct(hugo_symbol) from somatic_mutations"
rows = search_db(cursor, qry)
genes = [row[0] for row in rows]
print "\t", len(genes)
############################
print "mutations reported per gene"
print " %10s %5s %5s %s " % ("gene_name", "silent", "non_silent", "silent/non")
for gene in genes:
[silent_ct, non_silent_ct] = silent_proportion(cursor, gene)
if non_silent_ct>10:
print " %10s %5d %5d %4.2f " % (gene, silent_ct,
non_silent_ct, float(silent_ct)/non_silent_ct)
#if non_silent_ct:
# print " %10s %5d %5d %4.2f " % (gene, silent_ct,
# non_silent_ct, float(silent_ct)/non_silent_ct)
#else:
# print " %10s %5d %5d all_silent " % (gene, silent_ct, non_silent_ct)
#print " %4d %10s %5d %5d " % ( ct, gene, entries_per_gene[gene], silent_per_gene[gene])
print
cursor.close()
db.close()
#########################################
if __name__ == '__main__':
main()
| gpl-3.0 |
40223220/cd0504 | static/Brython3.1.1-20150328-091302/Lib/imp.py | 637 | 9839 | """This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_builtin, init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import load_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
load_dynamic = None
# Directly exposed by this module
from importlib._bootstrap import new_module
from importlib._bootstrap import cache_from_source, source_from_cache
from importlib import _bootstrap
#fixme brython
#from importlib import machinery
import importlib.machinery as machinery
import os
import sys
import tokenize
import warnings
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_magic():
"""Return the magic number for .pyc or .pyo files."""
return _bootstrap._MAGIC_BYTES
def get_tag():
"""Return the magic tag for .pyc or .pyo files."""
return sys.implementation.cache_tag
def get_suffixes():
warnings.warn('imp.get_suffixes() is deprecated; use the constants '
'defined on importlib.machinery instead',
DeprecationWarning, 2)
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'U', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""Null import object."""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibiilty support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, _bootstrap.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
#brython fix me
pass
def load_source(name, pathname, file=None):
msg = ('imp.load_source() is deprecated; use '
'importlib.machinery.SourceFileLoader(name, pathname).load_module()'
' instead')
warnings.warn(msg, DeprecationWarning, 2)
_LoadSourceCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourceFileLoader(name, pathname)
return module
class _LoadCompiledCompatibility(_HackedGetData,
_bootstrap.SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
#brython fix me
pass
def load_compiled(name, pathname, file=None):
msg = ('imp.load_compiled() is deprecated; use '
'importlib.machinery.SourcelessFileLoader(name, pathname).'
'load_module() instead ')
warnings.warn(msg, DeprecationWarning, 2)
_LoadCompiledCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourcelessFileLoader(name, pathname)
return module
def load_package(name, path):
msg = ('imp.load_package() is deprecated; use either '
'importlib.machinery.SourceFileLoader() or '
'importlib.machinery.SourcelessFileLoader() instead')
warnings.warn(msg, DeprecationWarning, 2)
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
return _bootstrap.SourceFileLoader(name, path).load_module(name)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_bootstrap._ERR_MSG.format(name), name=name)
encoding = None
if mode == 'U':
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or type(module) != type(sys):
raise TypeError("reload() argument must be module")
name = module.__name__
if name not in sys.modules:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name and parent_name not in sys.modules:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
module.__loader__.load_module(name)
# The module may have replaced itself in sys.modules!
return sys.modules[module.__name__]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
| agpl-3.0 |
camon/Flexget | tests/test_backlog.py | 22 | 1600 | from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestBacklog(FlexGetBase):
__yaml__ = """
tasks:
test:
mock:
- {title: 'Test.S01E01.hdtv-FlexGet', description: ''}
set:
description: '{{description}}I'
laterfield: 'something'
# Change the priority of set plugin so it runs on all entries. TODO: Remove, this is an ugly hack.
plugin_priority:
set: -254
backlog: 10 minutes
"""
def test_backlog(self):
"""Tests backlog (and snapshot) functionality."""
# Test entry comes out as expected on first run
self.execute_task('test')
entry = self.task.find_entry(title='Test.S01E01.hdtv-FlexGet')
assert entry['description'] == 'I'
assert entry['laterfield'] == 'something'
# Simulate entry leaving the task, make sure backlog injects it
del(self.manager.config['tasks']['test']['mock'])
self.execute_task('test')
entry = self.task.find_entry(title='Test.S01E01.hdtv-FlexGet')
assert entry['description'] == 'I'
assert entry['laterfield'] == 'something'
# This time take away the set plugin too, to make sure data is being restored at it's state from input
del(self.manager.config['tasks']['test']['set'])
self.execute_task('test')
entry = self.task.find_entry(title='Test.S01E01.hdtv-FlexGet')
assert entry['description'] == ''
assert 'laterfield' not in entry
| mit |
tom-heimbrodt/oeplatform | api/urls.py | 1 | 4527 | from django.conf.urls import url
from api import actions
from api import views
pgsql_qualifier = r"[\w\d_]+"
equal_qualifier = r"[\w\d\s\'\=]"
structures = r'table|sequence'
urlpatterns = [
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/$', views.Table.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/sequences/(?P<sequence>[\w\d_\s]+)/$', views.Sequence.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/columns/(?P<column>[\w\d_\s]+)?$', views.Column.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/id/(?P<id>[\d]+)/column/(?P<column>[\w\d_\s]+)/$', views.Fields.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/indexes/(?P<index>[\w\d_\s]+)$', views.Index.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/rows/(?P<row_id>[\d]+)?$', views.Rows.as_view()),
url(r'^v0/schema/(?P<schema>[\w\d_\s]+)/tables/(?P<table>[\w\d_\s]+)/rows/new?$', views.Rows.as_view(),{'action':'new'}),
url(r'^v0/advanced/search', views.create_ajax_handler(actions.data_search, allow_cors=True)),
url(r'^v0/advanced/insert', views.create_ajax_handler(actions.data_insert)),
url(r'^v0/advanced/delete', views.create_ajax_handler(actions.data_delete)),
url(r'^v0/advanced/update', views.create_ajax_handler(actions.data_update)),
url(r'^v0/advanced/info', views.create_ajax_handler(actions.data_info)),
url(r'^v0/advanced/has_schema', views.create_ajax_handler(actions.has_schema)),
url(r'^v0/advanced/has_table', views.create_ajax_handler(actions.has_table)),
url(r'^v0/advanced/has_sequence', views.create_ajax_handler(actions.has_sequence)),
url(r'^v0/advanced/has_type', views.create_ajax_handler(actions.has_type)),
url(r'^v0/advanced/get_schema_names', views.create_ajax_handler(actions.get_schema_names)),
url(r'^v0/advanced/get_table_names', views.create_ajax_handler(actions.get_table_names)),
url(r'^v0/advanced/get_view_names', views.create_ajax_handler(actions.get_view_names)),
url(r'^v0/advanced/get_view_definition', views.create_ajax_handler(actions.get_view_definition)),
url(r'^v0/advanced/get_columns', views.create_ajax_handler(actions.get_columns)),
url(r'^v0/advanced/get_pk_constraint', views.create_ajax_handler(actions.get_pk_constraint)),
url(r'^v0/advanced/get_foreign_keys', views.create_ajax_handler(actions.get_foreign_keys)),
url(r'^v0/advanced/get_indexes', views.create_ajax_handler(actions.get_indexes)),
url(r'^v0/advanced/get_unique_constraints', views.create_ajax_handler(actions.get_unique_constraints)),
url(r'^v0/advanced/request_dump', views.create_ajax_handler(actions.get_unique_constraints)),
url(r'^v0/advanced/connection/open', views.create_ajax_handler(actions.open_raw_connection)),
url(r'^v0/advanced/connection/close', views.create_ajax_handler(actions.close_raw_connection)),
url(r'^v0/advanced/connection/commit', views.create_ajax_handler(actions.commit_raw_connection)),
url(r'^v0/advanced/connection/rollback', views.create_ajax_handler(actions.rollback_raw_connection)),
url(r'^v0/advanced/cursor/open', views.create_ajax_handler(actions.open_cursor)),
url(r'^v0/advanced/cursor/close', views.create_ajax_handler(actions.close_cursor)),
url(r'^v0/advanced/cursor/fetch_one', views.create_ajax_handler(actions.fetchone)),
url(r'^v0/advanced/cursor/fetch_many', views.FetchView.as_view(), dict(fetchtype='all')),
url(r'^v0/advanced/cursor/fetch_all', views.FetchView.as_view(), dict(fetchtype='all')),
url(r'^v0/advanced/set_isolation_level', views.create_ajax_handler(actions.set_isolation_level)),
url(r'^v0/advanced/get_isolation_level', views.create_ajax_handler(actions.get_isolation_level)),
url(r'^v0/advanced/do_begin_twophase', views.create_ajax_handler(actions.do_begin_twophase)),
url(r'^v0/advanced/do_prepare_twophase', views.create_ajax_handler(actions.do_prepare_twophase)),
url(r'^v0/advanced/do_rollback_twophase', views.create_ajax_handler(actions.do_rollback_twophase)),
url(r'^v0/advanced/do_commit_twophase', views.create_ajax_handler(actions.do_commit_twophase)),
url(r'^v0/advanced/do_recover_twophase', views.create_ajax_handler(actions.do_recover_twophase)),
url(r'^v0/advanced/show_revisions', views.create_ajax_handler(actions.get_unique_constraints)),
url(r'usrprop/', views.get_users),
url(r'grpprop/', views.get_groups),
]
| agpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/pip/_vendor/packaging/version.py | 439 | 11949 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| mit |
jwjohns/PyMySQL | pymysql/tests/thirdparty/test_MySQLdb/test_MySQLdb_dbapi20.py | 10 | 7789 | #!/usr/bin/env python
from . import dbapi20
import pymysql
from pymysql.tests import base
try:
import unittest2 as unittest
except ImportError:
import unittest
class test_MySQLdb(dbapi20.DatabaseAPI20Test):
driver = pymysql
connect_args = ()
connect_kw_args = base.PyMySQLTestCase.databases[0].copy()
connect_kw_args.update(dict(read_default_file='~/.my.cnf',
charset='utf8',
sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
def test_setoutputsize(self): pass
def test_setoutputsize_basic(self): pass
def test_nextset(self): pass
"""The tests on fetchone and fetchall and rowcount bogusly
test for an exception if the statement cannot return a
result set. MySQL always returns a result set; it's just that
some things return empty result sets."""
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
## self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.assertTrue(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.assertTrue(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
## self.assertEqual(cur.fetchone(),None,
## 'cursor.fetchone should return None if no more rows available'
## )
self.assertTrue(cur.rowcount in (-1,1))
finally:
con.close()
# Same complaint as for fetchall and fetchone
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount should be -1 after executing no-result '
## 'statements'
## )
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
## self.assertTrue(cur.rowcount in (-1,1),
## 'cursor.rowcount should == number or rows inserted, or '
## 'set to -1 after executing an insert statement'
## )
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount not being reset to -1 after executing '
## 'no-result statements'
## )
finally:
con.close()
def test_callproc(self):
pass # performed in test_MySQL_capabilities
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
sql="""
create procedure deleteme()
begin
select count(*) from %(tp)sbooze;
select name from %(tp)sbooze;
end
""" % dict(tp=self.table_prefix)
cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
cur.execute("drop procedure deleteme")
@unittest.expectedFailure
def test_nextset(self):
from warnings import warn
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
if s:
empty = cur.fetchall()
self.assertEqual(len(empty), 0,
"non-empty result set after other result sets")
#warn("Incompatibility: MySQL returns an empty result set for the CALL itself",
# Warning)
#assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
if __name__ == '__main__':
unittest.main()
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/4NodeANDES/cantilever_different_Poisson/NumberOfDivision4/PoissonRatio0.35/compare_HDF5_ALL.py | 424 | 3382 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# the real essi hdf5 results
h5_result_new = sys.argv[1]
h5_result_ori = sys.argv[2]
disp_pass_or_fail=h5diff_disp(h5_result_ori,h5_result_new)
Gauss_pass_or_fail = 1
try:
Gauss_pass_or_fail=h5diff_Gauss_output(h5_result_ori,h5_result_new)
except KeyError:
pass
Element_Output_pass_or_fail = 1
try:
Element_Output_pass_or_fail=h5diff_Element_output(h5_result_ori,h5_result_new)
except KeyError:
pass
if disp_pass_or_fail and Gauss_pass_or_fail and Element_Output_pass_or_fail:
print headOK(), "All hdf5 results are the same."
print headOKCASE(),"-----------Done this case!-----------------"
else:
if disp_pass_or_fail==0:
print headFailed(),"-----------Displacement has mismatches!-----------------"
if Gauss_pass_or_fail==0:
print headFailed(),"-----------StressStrain has mismatches!-----------------"
if Element_Output_pass_or_fail==0:
print headFailed(),"-----------Element output has mismatches!-----------------"
# # The allowable tolerance between the ori_vals and new_vals values.
# tolerance=1e-5
# machine_epsilon=1e-16
# ori_vals=[]
# new_vals=[]
# ori_vals.append(find_max_disp(h5_result_ori,0))
# new_vals.append(find_max_disp(h5_result_new,0))
# # if multiple steps, compare the max_disp of random steps
# Nstep = find_disp_Nstep(h5_result_ori)
# if Nstep>5 :
# for i in xrange(1,4):
# test_step=random.randint(1,Nstep-1)
# ori_vals.append(find_max_disp(h5_result_ori,test_step))
# new_vals.append(find_max_disp(h5_result_new,test_step))
# # calculate the errors
# errors=[]
# for index, x in enumerate(ori_vals):
# if(abs(x))>machine_epsilon:
# errors.append(abs((new_vals[index]-x)/x))
# else:
# errors.append(machine_epsilon)
# # compare and form the flags
# flags=[]
# for item in errors:
# if abs(item)<tolerance:
# flags.append('pass')
# else:
# flags.append('failed')
# # print the results
# case_flag=1
# print headrun() , "-----------Testing results-----------------"
# print headstep() ,'{0} {1} {2} {3}'.format('back_value ','new_value ','error ','flag')
# for index, x in enumerate(errors):
# if(abs(x)<tolerance):
# print headOK() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# else:
# case_flag=0
# print headFailed() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# if(case_flag==1):
# print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# automatically find the script directory.
# sys.path.append("/home/yuan/Dropbox/3essi_self_verification/test_suite/scripts" )
# script_dir=sys.argv[1]
# print headstart() , "Running test cases..."
# print headlocation(), os.path.dirname(os.path.abspath(__file__))
# file_in=open("ori_vals_values.txt","r")
# Input the 1st line, which is the ori_vals value.
# ori_vals= float(file_in.readline())
# Input the 2nd line, which is the HDF5 output filename.
# new_vals=find_max_disp(file_in.readline());
# file_in.close() | cc0-1.0 |
swanndri/ROS-Healthcare-Simulator | se306/src/package1/scripts/status.py | 1 | 10098 | #!/usr/bin/env python
import roslib; roslib.load_manifest('package1')
import std_msgs.msg
import rospy
import Tkinter as tk
import ttk
import database
class StatusGUI(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#sets the dimensions and settings of thw window
self.geometry("380x380+700+100")
self.title("Resident status")
self.build_frames()
self.combobox_set_up()
self.cb.pack()
#build the GUI components:
self.build_gui_components()
#position the GUI components:
self.position_frames_and_components()
#initialize progress bar values
self.initialize_status_bars()
def build_frames(self):
self.all_bars_frame = tk.Frame(self)
self.horizontal_bars_frame = tk.Frame(self.all_bars_frame)
self.satisfaction_frame = ttk.Labelframe(self.horizontal_bars_frame,text="Satisfaction",padding=(0,0,10,10))
self.leisure_frame = ttk.Labelframe(self.horizontal_bars_frame,text="Leisure",padding=(0,0,10,10))
self.cleanliness_frame = ttk.Labelframe(self.horizontal_bars_frame,text="Cleanliness",padding=(0,0,10,10))
self.vital_frame = ttk.Labelframe(self.all_bars_frame,text="Vitals",padding=(0,10,0,10))
self.status_frame = ttk.Labelframe(self,text="Status")
self.combo_frame = ttk.Labelframe(self,text="Generate event")
def build_gui_components(self):
#Satisfaction levels
self.fullness_label = ttk.Label(self.satisfaction_frame,text="Fullness")
self.fullness_progress = ttk.Progressbar(self.satisfaction_frame, orient="horizontal",
length=150, mode="determinate")
self.hydration_label = ttk.Label(self.satisfaction_frame,text="Hydration")
self.hydration_progress = ttk.Progressbar(self.satisfaction_frame, orient="horizontal",
length=150, mode="determinate")
#Cleanliness levels
self.hygiene_label = ttk.Label(self.cleanliness_frame, text="Hygiene")
self.hygiene_progress = ttk.Progressbar(self.cleanliness_frame, orient="horizontal",
length=150, mode="determinate")
self.relief_label = ttk.Label(self.cleanliness_frame,text="Relief")
self.relief_progress = ttk.Progressbar(self.cleanliness_frame,orient="horizontal",
length=150, mode="determinate")
#Leisure levels
self.entertainment_label = ttk.Label(self.leisure_frame,text="Enjoyment") #this is entertainment
self.entertainment_progress = ttk.Progressbar(self.leisure_frame,orient="horizontal",
length=150, mode="determinate")
self.fitness_label = ttk.Label(self.leisure_frame,text="Fitness")
self.fitness_progress = ttk.Progressbar(self.leisure_frame,orient="horizontal",
length=150, mode="determinate")
#Vital levels
self.health_label = ttk.Label(self.vital_frame,text="Health")
self.health_progress = ttk.Progressbar(self.vital_frame, orient="vertical",
length=180, mode="determinate")
self.sanity_label = ttk.Label(self.vital_frame,text="Sanity")
self.sanity_progress = ttk.Progressbar(self.vital_frame, orient="vertical",
length=180, mode="determinate")
#Status updates
self.status_info = ttk.Label(self.status_frame,width="40",wraplength=320)
def position_frames_and_components(self):
#set up frames
self.all_bars_frame.pack()
self.horizontal_bars_frame.pack(side="left")
self.satisfaction_frame.pack(padx=(0,10),pady=10)
self.cleanliness_frame.pack(padx=(0,10),pady=10)
self.leisure_frame.pack(padx=(0,10),pady=10)
self.vital_frame.pack(side="left",padx=10,pady=5)
self.combo_frame.pack()
self.status_frame.pack()
#Satisfaction frame
self.satisfaction_frame.grid_columnconfigure(0,minsize=75)
self.fullness_label.grid(row=0)
self.fullness_progress.grid(row=0,column=1)
self.hydration_label.grid(row=1)
self.hydration_progress.grid(row=1,column=1)
#Cleanliness frame
self.cleanliness_frame.grid_columnconfigure(0,minsize=75)
self.hygiene_label.grid(row=0)
self.hygiene_progress.grid(row=0,column=1)
self.relief_label.grid(row=1)
self.relief_progress.grid(row=1,column=1)
#Leisure frame
self.leisure_frame.grid_columnconfigure(0,minsize=75)
self.entertainment_label.grid(row=0)
self.entertainment_progress.grid(row=0,column=1)
self.fitness_label.grid(row=1)
self.fitness_progress.grid(row=1,column=1)
#Vital levels frame
self.health_label.grid(row=1,padx=(5,1))
self.health_progress.grid(row=0,padx=(5,1))
self.sanity_label.grid(row=1,column=1,padx=(1,5))
self.sanity_progress.grid(row=0,column=1,padx=(1,5))
#status frame
self.status_info.grid(row=0,rowspan=2, columnspan=2, padx=10, pady=10)
def initialize_status_bars(self):
self.fullness_progress["value"] = 100
self.health_progress["value"] = 100
self.entertainment_progress["value"] = 100
self.sanity_progress["value"] = 100
self.fitness_progress["value"] = 100
self.hydration_progress["value"] = 100
self.hygiene_progress["value"] = 100
self.relief_progress["value"] = 100
def handle_selected(self, event):
#print("generating event")
index = self.cb.current()
selected_event = self.events[index]
#print(selected_event)
if selected_event == "Heart Attack":
#publish new message to robots
task = database.Database.EVENTS.get('Resident.heart_attack')
event_priority = task.get('priority')
event_name = 'Resident.heart_attack'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
print("Should publish new event - doctor.doctor.emergency") #example
elif selected_event == "Eat":
#publish new message to robots
task = database.Database.EVENTS.get('Resident.eat_snack')
event_priority = task.get('priority')
event_name = 'Resident.eat_snack'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
print("Should publish new event - ",selected_event)
elif selected_event == "Exercise":
task = database.Database.EVENTS.get('Resident.gym')
event_priority = task.get('priority')
event_name = 'Resident.gym'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
#publish new message to robots
print("Should publish new event - ",selected_event)
elif selected_event == "Bath":
task = database.Database.EVENTS.get('Resident.bath')
event_priority = task.get('priority')
event_name = 'Resident.bath'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
#publish new message to robots
print("Should publish new event - ",selected_event)
elif selected_event == "Toilet":
task = database.Database.EVENTS.get('Resident.toilet')
event_priority = task.get('priority')
event_name = 'Resident.toilet'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
#publish new message to robots
print("Should publish new event - ",selected_event)
elif selected_event == "Sleep":
task = database.Database.EVENTS.get('Resident.sleep')
event_priority = task.get('priority')
event_name = 'Resident.sleep'
event_duration = task.get('duration')
event_destination = task.get('destination')
event_pub.publish("%d %s %d %s" % (event_priority, event_name, event_duration, event_destination))
#publish new message to robots
print("Should publish new event - ",selected_event)
def combobox_set_up(self):
self.events = ('Heart Attack','Eat','Exercise','Sleep','Bath','Toilet')
self.cb = ttk.Combobox(self.combo_frame, values=self.events, state='readonly')
self.cb.bind("<<ComboboxSelected>>", self.handle_selected)
def update_status_level(self,status_type,status_value):
if status_type == "Fullness":
self.fullness_progress["value"] = status_value
elif status_type == "Health":
self.health_progress["value"] = status_value
elif status_type == "Entertainment":
self.entertainment_progress["value"] = status_value
elif status_type == "Sanity":
self.sanity_progress["value"] = status_value
elif status_type == "Fitness":
self.fitness_progress["value"] = status_value
elif status_type == "Hydration":
self.hydration_progress["value"] = status_value
elif status_type == "Hygiene":
self.hygiene_progress["value"] = status_value
elif status_type == "Relief":
self.relief_progress["value"] = status_value
rospy.init_node('status', anonymous=True)
rate = rospy.Rate(40)
def callback(msg):
print(msg.data)
status_name, status_value = msg.data.split()
status_value = float(status_value)
if (status_value>80):
pass
elif(status_value>50):
stat_pub.publish("%s %s" % (status_name, 'med'))
elif(status_value>20):
stat_pub.publish("%s %s" % (status_name, 'low'))
elif(status_value>0):
stat_pub.publish("%s %s" % (status_name, 'dan'))
else:
print "Something has gone terribly wrong"
status_type = status_name[:-1]
mGui.update_status_level(status_type,status_value)
if status_value <= 0:
print ("0/100")
else:
print (msg.data + "/100")
def scheduler_callback(msg):
task = msg.data
print task
task = task.split()[1]
status = ''
#Search the dictionary (resident_statuses) in the Constants file for the correct status
temp = database.Database.EVENTS.get(task)
print temp
status = temp.get('explanation')
mGui.status_info["text"] = status
sub = rospy.Subscriber("human", std_msgs.msg.String, callback)
sub = rospy.Subscriber("scheduler", std_msgs.msg.String, scheduler_callback)
stat_pub = rospy.Publisher("human_status", std_msgs.msg.String, queue_size = 10)
event_pub = rospy.Publisher('scheduler', std_msgs.msg.String, queue_size=10)
mGui = StatusGUI()
mGui.mainloop()
while not rospy.is_shutdown():
rate.sleep()
| mit |
gamingrobot/second-sunrise | game/plugins/core/menu/menu.py | 1 | 3648 | from direct.gui.DirectGui import *
import xml.etree.ElementTree as et
class Menu:
"""menu manager"""
def __init__(self, xml):
self.frames = []
self.directObjects = []
self.menuConfigDir = "config/menu/"
self.menuCodeDir = "plugins.core.menu.controllers"
self.currentMenu = ""
self.reload(xml)
def reload(self, xml):
#create frame
#check if config= is set
try:
config = xml.get('config')
xml = et.parse(self.menuConfigDir + xml.get('config') + '.xml')
self.currentMenu = config
except:
pass
#add buttons because config= is not set
yPos = 0.8
for but in xml.findall('button'):
button = DirectButton(text=but.get('text', '-'), pos=(0.0, 0.0, yPos), scale=.065)
yPos -= 0.1
action = but.get('action')
if action == "menu":
button['command'] = self.changeMenu
button['extraArgs'] = [but.get('target')]
elif action == "function":
log.info("Going to menu:", self.currentMenu.lower())
#base = self.menuCodeDir + '.' + self.currentMenu.lower()
#menu = __import__(base, globals(), locals(), [self.currentMenu.lower()])
#menu = getattr(menu, self.currentMenu)
#print menu
#inst = getattr(menu, self.currentMenu)(manager, xml) # Init
button['command'] = self.getFunc(xml, but, 'target')
elif action == "config":
button['command'] = manager.transition
button['extraArgs'] = [but.get('target')]
elif action == "event":
button['command'] = manager.get('events').triggerEvent
button['extraArgs'] = [but.get('target'), {}]
button.hide()
self.directObjects.append(button)
for sldr in xml.findall('slider'):
try:
low = sldr.get('min')
except:
low = 0
try:
high = sldr.get('max')
except:
high = 100
try:
step = sldr.get('step')
except:
step = 0.01
#get the function based on the attribute onchange
func = self.getFunc(xml, sldr, 'onchange')
slider = DirectSlider(range=(float(low), float(high)), value=float(sldr.get('value')), pageSize=float(step), command=func)
slider['extraArgs'] = [slider]
yPos -= 0.1
slider.hide()
self.directObjects.append(slider)
'''for fld in xml.findall('input'):
field = DirectEntry(parent)
field.hide()
self.directObjects.append(field)'''
def start(self):
for obj in self.directObjects:
obj.show()
def stop(self):
for obj in self.directObjects:
obj.hide()
def destroy(self):
for obj in self.directObjects:
obj.destroy()
def getFunc(self, xml, el, attr):
base = self.menuCodeDir + '.' + self.currentMenu.lower()
menu = __import__(base, globals(), locals(), [self.currentMenu.lower()])
inst = getattr(menu, self.currentMenu)(xml) # Init controller
return getattr(inst, el.get(attr)) # function call
def changeMenu(self, target):
xml = et.parse(self.menuConfigDir + target + '.xml')
self.currentMenu = target
self.destroy()
self.directObjects = []
self.reload(xml)
self.start()
| apache-2.0 |
Matt-Deacalion/django | tests/middleware/test_security.py | 291 | 7781 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| bsd-3-clause |
icdishb/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
PhilSk/zulip | zerver/management/commands/export_usermessage_batch.py | 43 | 1977 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
import glob
import logging
import os
import shutil
import tempfile
import ujson
from zerver.lib.export import export_usermessages_batch
class Command(BaseCommand):
help = """UserMessage fetching helper for export.py"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--path',
dest='path',
action="store",
default=None,
help='Path to find messages.json archives')
parser.add_argument('--thread',
dest='thread',
action="store",
default=None,
help='Thread ID')
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
logging.info("Starting UserMessage batch thread %s" % (options['thread'],))
files = set(glob.glob(os.path.join(options['path'], 'messages-*.json.partial')))
for partial_path in files:
locked_path = partial_path.replace(".json.partial", ".json.locked")
output_path = partial_path.replace(".json.partial", ".json")
try:
shutil.move(partial_path, locked_path)
except Exception:
# Already claimed by another process
continue
logging.info("Thread %s processing %s" % (options['thread'], output_path))
try:
export_usermessages_batch(locked_path, output_path)
except Exception:
# Put the item back in the free pool when we fail
shutil.move(locked_path, partial_path)
raise
| apache-2.0 |
papouso/odoo | addons/account_budget/wizard/account_budget_crossovered_report.py | 375 | 2089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_budget_crossvered_report(osv.osv_memory):
_name = "account.budget.crossvered.report"
_description = "Account Budget crossvered report"
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'crossovered.budget',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-full'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_crossoveredbudget', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pdelsante/thug | thug/DOM/W3C/HTML/HTMLSelectElement.py | 1 | 1663 | #!/usr/bin/env python
from thug.DOM.W3C.Core.DOMException import DOMException
from .HTMLElement import HTMLElement
from .HTMLOptionsCollection import HTMLOptionsCollection
from .attr_property import attr_property
from .compatibility import thug_long
class HTMLSelectElement(HTMLElement):
selectedIndex = 0
value = None
disabled = attr_property("disabled", bool)
multiple = attr_property("multiple", bool)
name = attr_property("name")
size = attr_property("size", thug_long)
tabIndex = attr_property("tabindex", thug_long)
def __init__(self, doc, tag):
HTMLElement.__init__(self, doc, tag)
self._options = [t for t in self.tag.find_all("option")]
@property
def type(self):
return "select-multiple" if self.multiple else "select-one"
@property
def length(self):
return len(self.options)
@property
def form(self):
return None
@property
def options(self):
return HTMLOptionsCollection(self.doc, self._options)
def add(self, element, before):
if not before:
self._options.append(element)
return
index = None
for opt in self._options:
if before.value in (opt.value, ):
index = self._options.index(opt)
if index is None:
raise DOMException(DOMException.NOT_FOUND_ERR)
self._options.insert(index, element)
def remove(self, index):
if index > len(self._options):
return
del self._options[index]
def blur(self):
pass
def focus(self):
pass
| gpl-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/topology_parameters.py | 6 | 1144 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyParameters(Model):
"""Parameters that define the representation of topology.
:param target_resource_group_name: The name of the target resource group
to perform topology on.
:type target_resource_group_name: str
"""
_validation = {
'target_resource_group_name': {'required': True},
}
_attribute_map = {
'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'},
}
def __init__(self, target_resource_group_name):
super(TopologyParameters, self).__init__()
self.target_resource_group_name = target_resource_group_name
| mit |
connoranderson/Speechables | mechanize-0.2.5/mechanize/_markupbase.py | 134 | 14399 | # Taken from Python 2.6.4 for use by _sgmllib.py
"""Shared support for scanning document type declarations in HTML and XHTML.
This module is used as a foundation for the HTMLParser and sgmllib
modules (indirectly, for htmllib as well). It has no documented
public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase:
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in ("attlist", "linktype", "link", "element"):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in ("if", "else", "endif"):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in ("attlist", "element", "entity", "notation"):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
| apache-2.0 |
uranusjr/django | tests/file_uploads/tests.py | 45 | 23742 | import base64
import hashlib
import os
import shutil
import sys
import tempfile as sys_tempfile
import unittest
from io import BytesIO, StringIO
from urllib.parse import quote
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes)
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
response = self.client.post('/unicode_name/', {'file_unicode': file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % quote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % quote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = response.json()
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = response.json()
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = response.json()
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = response.json()
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = response.json()
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
self.assertEqual(self.client.request(**r).json(), {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
self.assertEqual(self.client.request(**r).json(), {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json())
# Large files don't go through.
self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
with self.assertRaises(AttributeError):
self.client.post('/quota/broken/', {'f': file})
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = response.json()
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The files were parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super().handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def setUp(self):
self.obj = FileModel()
@unittest.skipIf(sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().")
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
with self.assertRaises(PermissionError):
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0], "%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
| bsd-3-clause |
theflofly/tensorflow | tensorflow/contrib/seq2seq/python/ops/basic_decoder.py | 6 | 9286 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import sampler as sampler_py
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import layers
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self._helper.sample_ids_dtype)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
class BasicDecoderV2(decoder.BaseDecoder):
"""Basic sampling decoder."""
def __init__(self, cell, sampler, output_layer=None, **kwargs):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
sampler: A `Sampler` instance.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior to
storing the result or sampling.
**kwargs: Other keyward arguments for layer creation.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not isinstance(sampler, sampler_py.Sampler):
raise TypeError("sampler must be a Sampler, received: %s" % (sampler,))
if (output_layer is not None and
not isinstance(output_layer, layers.Layer)):
raise TypeError(
"output_layer must be a Layer, received: %s" % (output_layer,))
self.cell = cell
self.sampler = sampler
self.output_layer = output_layer
super(BasicDecoderV2, self).__init__(**kwargs)
def initialize(self, inputs, initial_state=None, **kwargs):
"""Initialize the decoder."""
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
self._cell_dtype = nest.flatten(initial_state)[0].dtype
return self.sampler.initialize(inputs, **kwargs) + (initial_state,)
@property
def batch_size(self):
return self.sampler.batch_size
def _rnn_output_size(self):
size = tensor_shape.TensorShape(self.cell.output_size)
if self.output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s), size)
layer_output_shape = self.output_layer.compute_output_shape(
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=self.sampler.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = self._cell_dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
self.sampler.sample_ids_dtype)
def step(self, time, inputs, state):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
cell_outputs, cell_state = self.cell(inputs, state)
if self.output_layer is not None:
cell_outputs = self.output_layer(cell_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| apache-2.0 |
KnowNo/reviewboard | docs/releasenotes/conf.py | 3 | 7162 | # -*- coding: utf-8 -*-
#
# Release Notes build configuration file, created by
# sphinx-quickstart on Thu Feb 12 02:10:34 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
import os
import sys
from datetime import datetime
sys.path.append(os.path.abspath('_ext'))
# Set this up to parse Django-driven code.
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..', '..')))
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..', '..',
'..', 'djblets')))
sys.path.insert(0, os.path.dirname(__file__))
import reviewboard
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'extralinks', 'retina_images']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Release Notes'
copyright = u'2009-%s, Beanbag, Inc.' % datetime.now().year
bugtracker_url = 'https://www.reviewboard.org/bugs/%s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join([str(i) for i in reviewboard.__version_info__[:2]])
# The full version, including alpha/beta/rc tags.
release = reviewboard.get_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_theme = 'default'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Release Notes"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReleaseNotes'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ReleaseNotes.tex', ur'Release Notes',
ur'Christian Hammond', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'django': ('https://docs.djangoproject.com/en/%s/'
% reviewboard.django_major_version,
'https://docs.djangoproject.com/en/%s/_objects/'
% reviewboard.django_major_version),
'python': ('https://docs.python.org/2.7', None),
'rbtools': ('https://www.reviewboard.org/docs/rbtools/dev/', None),
'reviewboard': ('https://www.reviewboard.org/docs/manual/dev/', None),
}
| mit |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/utils/exporters/text_exporter.py | 1 | 2962 | """
Downloader for Reddit takes a list of reddit users and subreddits and downloads content posted to reddit either by the
users or on the subreddits.
Copyright (C) 2017, Kyle Hickey
This file is part of the Downloader for Reddit.
Downloader for Reddit is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Downloader for Reddit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Downloader for Reddit. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
logger = logging.getLogger(__name__)
def export_posts_to_text(post_list, file_path):
"""
Exports the supplied list of posts to a text file.
:param post_list: A list of posts that are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, mode='a', encoding='utf-8') as file:
for post in post_list:
post_serial = format_post_output(post)
file.write(post_serial + '\n\n')
logger.info('Exported posts to text file', extra={'export_count': len(post_list)})
def format_post_output(post):
"""
Formats the attributes of the supplied post into a format that is easy to read from a text file.
:param post: The post that is to be formatted.
:return: The supplied posts attributes in a readable formatted string.
"""
return 'Author: %s\nSubreddit: %s\nTitle: %s\nCreated: %s\nUrl: %s\nStatus: %s\nSave Status: %s' % \
(post.author, post.subreddit, post.title, post.date_posted, post.url, post.status, post.save_status)
def export_url_list(url_list, file_path):
"""
Exports a list of urls to a text file.
:param url_list: A list of urls that are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, 'a') as file:
for url in url_list:
file.write('%s\n' % url)
logger.info('Exported url list to text file', extra={'export_count': len(url_list)})
def export_reddit_objects_to_text(object_list, file_path):
"""
Exports a list of names in the supplied object list to a text file.
:param object_list: A list of reddit objects who's names are to be exported to a text file.
:param file_path: The path at which the text file will be created.
"""
with open(file_path, mode='a', encoding='utf-8') as file:
for ro in object_list:
file.write(ro.name + '\n')
logger.info('Exported reddit objects to text file', extra={'export_count': len(object_list)})
| gpl-3.0 |
huggingface/transformers | examples/research_projects/rag-end2end-retriever/lightning_base.py | 1 | 16400 | import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.plugins.training_type import DDPPlugin
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps()
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, stage):
if stage == "test":
self.dataset_size = len(self.test_dataloader().dataset)
else:
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
self.dataset_size = len(self.train_dataloader().dataset)
def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("best_tfmr")
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default=str(Path(__file__).parent / "test_run" / "cache"),
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
parser.add_argument("--adafactor", action="store_true")
class InitCallback(pl.Callback):
# this process can also be done with PL ddp plugging.
# But still it is experimental (check original RAG, I updated that with pluggin (shamanez))
def on_sanity_check_start(self, trainer, pl_module):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class CheckParamCallback(pl.Callback):
# check whether new added model paramters are differentiable
def on_after_backward(self, trainer, pl_module):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(name)
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=str(Path(__file__).parent / "test_run" / "model_checkpoints"),
type=str,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir",
default=str(Path(__file__).parent / "test_run" / "dummy-train-data"),
type=str,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
def generic_train(
model: BaseTransformer,
args: argparse.Namespace,
early_stopping_callback=None,
logger=True, # can pass WandbLogger() here
extra_callbacks=[],
checkpoint_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
if checkpoint_callback is None:
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
)
if early_stopping_callback:
extra_callbacks.append(early_stopping_callback)
if logging_callback is None:
logging_callback = LoggingCallback()
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["accelerator"] = "ddp"
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
# train_params["accelerator"] = extra_train_kwargs.get("accelerator", None)
train_params["profiler"] = None # extra_train_kwargs.get("profiler", None)
trainer = pl.Trainer.from_argparse_args(
args,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback],
logger=logger,
plugins=[DDPPlugin(find_unused_parameters=True)], # this is needed in new pytorch-lightning new version
val_check_interval=1,
num_sanity_val_steps=2,
**train_params,
)
if args.do_train:
trainer.fit(model)
# else:
# print("RAG modeling tests with new set functions successfuly executed!")
return trainer
| apache-2.0 |
rhenanbartels/hrv | tests/test_filters.py | 1 | 4258 | from unittest import TestCase
import numpy as np
from hrv.filters import moving_average, moving_median, quotient, threshold_filter
from hrv.rri import RRi
class Filter(TestCase):
def test_moving_average_order_3(self):
fake_rri = np.array([810, 830, 860, 790, 804])
rri_filt = moving_average(fake_rri, order=3)
expected = [810, 833.33, 826.66, 818, 804]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_average_order_5(self):
fake_rri = np.array([810, 830, 860, 790, 804, 801, 800])
rri_filt = moving_average(fake_rri, order=5)
expected = [810, 830, 818.79, 817.0, 811.0, 801, 800]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_median_oder_3(self):
fake_rri = np.array([810, 830, 860, 790, 804])
rri_filt = moving_median(fake_rri, order=3)
expected = [810, 830.0, 830.0, 804, 804]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_median_order_5(self):
fake_rri = np.array([810, 830, 860, 790, 804, 801, 800])
rri_filt = moving_median(fake_rri, order=5)
expected = [810, 830, 810.0, 804.0, 801.0, 801, 800]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_quotient_filter(self):
fake_rri = [810, 580, 805, 790]
rri_filt = quotient(fake_rri)
expected = [805, 790]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_quotient_filter_receiving_and_return_rri_class(self):
fake_rri = RRi([810, 580, 805, 790])
rri_filt = quotient(fake_rri)
expected = RRi(rri=[805, 790], time=[1.385, 2.175])
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected.values, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected.time, decimal=2)
def test_movinng_filters_receiving_and_return_rri_class(self):
fake_rri = RRi([810, 830, 860, 790, 804, 801, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = moving_median(fake_rri)
expected_rri = [810, 830, 830, 804, 801, 801, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter(self):
fake_rri = RRi([810, 830, 860, 865, 804, 1100, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold=250)
expected_rri = [810, 830, 860, 865, 804, 748.40625, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter_noise_in_the_beginning(self):
fake_rri = RRi([810, 500, 860, 865, 804, 810, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold=250)
expected_rri = [810, 814.34375, 860, 865, 804, 810, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter_string_threshold(self):
fake_rri = RRi([810, 650, 860, 865, 804, 810, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold="strong")
expected_rri = [810, 814.34375, 860, 865, 804, 810, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
| bsd-3-clause |
jric/Dumper.py | test.py | 1 | 2916 | from __future__ import print_function
from dumper import dump, dumps, Dumper
import dumper
import io
import sys
buff = io.StringIO()
dumper.default_dumper = Dumper(output=buff)
# BEGIN TEST CASES
def do_dump_scalars():
dump(1)
dump('a')
dump("foo")
dump('''string
with a newline''')
return "1'a''foo''string\\nwith a newline'"
def test_do_dump_scalars():
assert_output_as_expected(do_dump_scalars)
def do_dumps_multi_values():
s = dumps(1, " is less than ", 10) # returns unicode string in py2
if sys.version < (3, 0):
s = s.encode('ascii', 'replace') # convert back to regular string
dump(s)
return "\"1' is less than '10\""
def test_dumps_multi_values():
assert_output_as_expected(do_dumps_multi_values)
def do_dump_json():
obj = {
"httpCode": 200,
"extensionData": [
{
"extensionValue": "egg"
}
]
}
dump(obj)
return '''
<dict at {WORD}>:
httpCode: 200
extensionData: <list at {WORD}>
0: <dict at {WORD}>:
extensionValue: 'egg'
'''
def test_do_dump_json():
assert_output_matches_template(do_dump_json)
# END TEST CASES
def text_type(val):
if sys.version < '3':
return unicode(val)
else:
return str(val)
def assertMatching(a, b):
''' Asserts that the lines from string, a, match the lines in the string, b.
a is the expected string / pattern
b is the actual string
We ignore leading/trailing whitespace
'''
a_lines = a.strip().split("\n")
b_lines = b.strip().split("\n")
if len(a_lines) != len(b_lines):
raise AssertionError("a has " + text_type(len(a_lines)) + ", but b has " + text_type(len(b_lines)) + " lines: a={" + a + "}, b={" + b + "}")
for i in range(0, len(a_lines)):
assert a_lines[i] == b_lines[i]
def assert_output_matches_template(func):
# TODO: implement this
pass
def assert_output_as_expected(func):
# buffer stdout
try:
output = func()
assertMatching(output, buff.getvalue())
finally:
# reset the buffer for the next test
buff.truncate(0)
buff.seek(0)
if __name__ == "__main__":
l1 = [3, 5, 'hello']
t1 = ('uh', 'oh')
l2 = ['foo', t1]
d1 = {'k1': 'val1',
'k2': l1,
'k2': l2}
print("a list: ", dumps (l1), "; a tuple: ", dumps (t1))
print("a complex list: ")
dump (l2)
dump (d1)
print("same dict, printed from dumps(): ")
print(dumps(d1))
dump (19)
dump ("\nMy birth year!\n")
dumper = Dumper (max_depth=1)
l = ['foo', ['bar', 'baz', (1, 2, 3)]]
dumper.dump (l)
dumper.max_depth = 2
dumper.dump (l)
l[1][2] = tuple (range (11))
dumper.dump (l)
dumper.max_depth = None
print(dumper.max_depth)
class Foo: pass
class Bar: pass
| bsd-3-clause |
achang97/YouTunes | lib/python2.7/site-packages/docutils/transforms/peps.py | 10 | 11056 | # $Id: peps.py 7995 2016-12-10 17:50:59Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://hg.python.org'
'/peps/file/default/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(r',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
| mit |
matejcik/weblate | weblate/accounts/tests/test_registration.py | 1 | 10404 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user handling.
"""
import json
import httpretty
from six.moves.urllib.parse import parse_qs, urlparse
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.core import mail
from weblate.accounts.models import VerifiedEmail
from weblate.trans.tests.test_views import RegistrationTestMixin
from weblate.trans.tests import OverrideSettings
REGISTRATION_DATA = {
'username': 'username',
'email': 'noreply@weblate.org',
'first_name': 'First Last',
'captcha_id': '00',
'captcha': '9999'
}
class RegistrationTest(TestCase, RegistrationTestMixin):
clear_cookie = False
def assert_registration(self, match=None):
url = self.assert_registration_mailbox(match)
if self.clear_cookie and 'sessionid' in self.client.cookies:
del self.client.cookies['sessionid']
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(
response,
reverse('password')
)
@OverrideSettings(REGISTRATION_CAPTCHA=True)
def test_register_captcha(self):
# Enable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Please check your math and try again.'
)
@OverrideSettings(REGISTRATION_OPEN=False)
def test_register_closed(self):
# Disable registration
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Sorry, but registrations on this site are disabled.'
)
@OverrideSettings(REGISTRATION_OPEN=True)
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
self.assert_registration()
# Set password
response = self.client.post(
reverse('password'),
{
'password1': 'password',
'password2': 'password',
}
)
self.assertRedirects(response, reverse('profile'))
# Check we can access home (was redirected to password change)
response = self.client.get(reverse('home'))
self.assertContains(response, 'First Last')
user = User.objects.get(username='username')
# Verify user is active
self.assertTrue(user.is_active)
# Verify stored first/last name
self.assertEqual(user.first_name, 'First Last')
@OverrideSettings(REGISTRATION_OPEN=True)
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_double_register(self):
"""Test double registration from single browser"""
# First registration
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
first_url = self.assert_registration_mailbox()
mail.outbox.pop()
# Second registration
data = REGISTRATION_DATA.copy()
data['email'] = 'noreply@example.net'
data['username'] = 'second'
response = self.client.post(
reverse('register'),
data,
)
second_url = self.assert_registration_mailbox()
mail.outbox.pop()
# Confirm first account
response = self.client.get(first_url, follow=True)
self.assertRedirects(
response,
reverse('password')
)
self.client.get(reverse('logout'))
# Confirm second account
response = self.client.get(second_url, follow=True)
self.assertRedirects(
response,
reverse('password')
)
@OverrideSettings(REGISTRATION_OPEN=True)
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register_missing(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
url = self.assert_registration_mailbox()
# Remove session ID from URL
url = url.split('&id=')[0]
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse('login'))
self.assertContains(response, 'Failed to verify your registration')
def test_reset(self):
'''
Test for password reset.
'''
User.objects.create_user('testuser', 'test@example.com', 'x')
response = self.client.get(
reverse('password_reset'),
)
self.assertContains(response, 'Reset my password')
response = self.client.post(
reverse('password_reset'),
{
'email': 'test@example.com'
}
)
self.assertRedirects(response, reverse('email-sent'))
self.assert_registration('[Weblate] Password reset on Weblate')
def test_wrong_username(self):
data = REGISTRATION_DATA.copy()
data['username'] = ''
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'This field is required.',
)
def test_wrong_mail(self):
data = REGISTRATION_DATA.copy()
data['email'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Enter a valid email address.'
)
def test_spam(self):
data = REGISTRATION_DATA.copy()
data['content'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Invalid value'
)
def test_add_mail(self):
# Create user
self.test_register()
mail.outbox.pop()
# Check adding email page
response = self.client.get(
reverse('email_login')
)
self.assertContains(response, 'Register email')
# Try invalid address first
response = self.client.post(
reverse('email_login'),
{'email': 'invalid'},
)
self.assertContains(response, 'has-error')
# Add email account
response = self.client.post(
reverse('email_login'),
{'email': 'second@example.net'},
follow=True,
)
self.assertRedirects(response, reverse('email-sent'))
# Verify confirmation mail
url = self.assert_registration_mailbox()
response = self.client.get(url, follow=True)
self.assertRedirects(
response, '{0}#auth'.format(reverse('profile'))
)
# Check database models
user = User.objects.get(username='username')
self.assertEqual(
VerifiedEmail.objects.filter(social__user=user).count(), 2
)
self.assertTrue(
VerifiedEmail.objects.filter(
social__user=user, email='second@example.net'
).exists()
)
@httpretty.activate
def test_github(self):
"""Test GitHub integration"""
httpretty.register_uri(
httpretty.POST,
'https://github.com/login/oauth/access_token',
body=json.dumps({
'access_token': '123',
'token_type': 'bearer',
})
)
httpretty.register_uri(
httpretty.GET,
'https://api.github.com/user',
body=json.dumps({
'email': 'foo@example.net',
'login': 'weblate',
'id': 1,
'name': 'Weblate',
}),
)
httpretty.register_uri(
httpretty.GET,
'https://api.github.com/user/emails',
body=json.dumps([
{
'email': 'noreply@example.org',
'verified': False,
'primary': False,
}, {
'email': 'noreply@weblate.org',
'verified': True,
'primary': True
}
])
)
response = self.client.get(reverse('social:begin', args=('github',)))
self.assertEqual(response.status_code, 302)
self.assertTrue(
response['Location'].startswith(
'https://github.com/login/oauth/authorize'
)
)
query = parse_qs(urlparse(response['Location']).query)
return_query = parse_qs(urlparse(query['redirect_uri'][0]).query)
response = self.client.get(
reverse('social:complete', args=('github',)),
{
'state': query['state'][0],
'redirect_state': return_query['redirect_state'][0],
'code': 'XXX'
},
follow=True
)
user = User.objects.get(username='weblate')
self.assertEqual(user.first_name, 'Weblate')
self.assertEqual(user.email, 'noreply@weblate.org')
class NoCookieRegistrationTest(RegistrationTest):
clear_cookie = True
| gpl-3.0 |
partofthething/home-assistant | homeassistant/components/pi4ioe5v9xxxx/binary_sensor.py | 6 | 2354 | """Support for binary sensor using RPi GPIO."""
from pi4ioe5v9xxxx import pi4ioe5v9xxxx
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_PINS = "pins"
CONF_I2CBUS = "i2c_bus"
CONF_I2CADDR = "i2c_address"
CONF_BITS = "bits"
DEFAULT_INVERT_LOGIC = False
DEFAULT_BITS = 24
DEFAULT_BUS = 1
DEFAULT_ADDR = 0x20
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SENSORS_SCHEMA,
vol.Optional(CONF_I2CBUS, default=DEFAULT_BUS): cv.positive_int,
vol.Optional(CONF_I2CADDR, default=DEFAULT_ADDR): cv.positive_int,
vol.Optional(CONF_BITS, default=DEFAULT_BITS): cv.positive_int,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IO expander devices."""
pins = config[CONF_PINS]
binary_sensors = []
pi4ioe5v9xxxx.setup(
i2c_bus=config[CONF_I2CBUS],
i2c_addr=config[CONF_I2CADDR],
bits=config[CONF_BITS],
read_mode=True,
invert=False,
)
for pin_num, pin_name in pins.items():
binary_sensors.append(
Pi4ioe5v9BinarySensor(pin_name, pin_num, config[CONF_INVERT_LOGIC])
)
add_entities(binary_sensors, True)
class Pi4ioe5v9BinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses pi4ioe5v9xxxx IO expander in read mode."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pi4ioe5v9xxxx sensor."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._invert_logic = invert_logic
self._state = pi4ioe5v9xxxx.pin_from_memory(self._pin)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
def update(self):
"""Update the IO state."""
pi4ioe5v9xxxx.hw_to_memory()
self._state = pi4ioe5v9xxxx.pin_from_memory(self._pin)
| mit |
martonw/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/stepsequence.py | 126 | 3584 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool import steps
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import QueueEngine
_log = logging.getLogger(__name__)
class StepSequenceErrorHandler():
@classmethod
def handle_script_error(cls, tool, patch, script_error):
raise NotImplementedError, "subclasses must implement"
@classmethod
def handle_checkout_needs_update(cls, tool, state, options, error):
raise NotImplementedError, "subclasses must implement"
class StepSequence(object):
def __init__(self, steps):
self._steps = steps or []
def options(self):
collected_options = [
steps.Options.parent_command,
steps.Options.quiet,
]
for step in self._steps:
collected_options = collected_options + step.options()
# Remove duplicates.
collected_options = sorted(set(collected_options))
return collected_options
def _run(self, tool, options, state):
for step in self._steps:
step(tool, options).run(state)
def run_and_handle_errors(self, tool, options, state=None):
if not state:
state = {}
try:
self._run(tool, options, state)
except CheckoutNeedsUpdate, e:
_log.info("Commit failed because the checkout is out of date. Please update and try again.")
if options.parent_command:
command = tool.command_by_name(options.parent_command)
command.handle_checkout_needs_update(tool, state, options, e)
QueueEngine.exit_after_handled_error(e)
except ScriptError, e:
if not options.quiet:
_log.error(e.message_with_output())
if options.parent_command:
command = tool.command_by_name(options.parent_command)
command.handle_script_error(tool, state, e)
QueueEngine.exit_after_handled_error(e)
| bsd-3-clause |
tudennis/LeetCode---kamyu104-11-24-2015 | Python/number-of-lines-to-write-string.py | 2 | 1935 | # Time: O(n)
# Space: O(1)
# We are to write the letters of a given string S, from left to right into lines.
# Each line has maximum width 100 units, and if writing a letter would cause the width
# of the line to exceed 100 units, it is written on the next line.
# We are given an array widths, an array where widths[0] is the width of 'a', widths[1]
# is the width of 'b', ..., and widths[25] is the width of 'z'.
#
# Now answer two questions: how many lines have at least one character from S,
# and what is the width used by the last such line? Return your answer as an integer list of length 2.
#
# Example :
# Input:
# widths = [10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
# S = "abcdefghijklmnopqrstuvwxyz"
# Output: [3, 60]
#
# Explanation:
# All letters have the same length of 10. To write all 26 letters,
# we need two full lines and one line with 60 units.
# Example :
# Input:
# widths = [4,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10]
# S = "bbbcccdddaaa"
# Output: [2, 4]
#
# Explanation:
# All letters except 'a' have the same length of 10, and
# "bbbcccdddaa" will cover 9 * 10 + 2 * 4 = 98 units.
# For the last 'a', it is written on the second line because
# there is only 2 units left in the first line.
# So the answer is 2 lines, plus 4 units in the second line.
#
# Note:
# - The length of S will be in the range [1, 1000].
# - S will only contain lowercase letters.
# - widths is an array of length 26.
# - widths[i] will be in the range of [2, 10].
class Solution(object):
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
result = [1, 0]
for c in S:
w = widths[ord(c)-ord('a')]
result[1] += w
if result[1] > 100:
result[0] += 1
result[1] = w
return result
| mit |
diogocs1/comps | web/addons/l10n_multilang/l10n_multilang.py | 378 | 8428 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import os
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Change wizard that a new account chart for a company.
* Add option to install languages during the setup
* Copy translations for COA, Tax, Tax Code and Fiscal Position from templates to target objects.
"""
_inherit = 'wizard.multi.charts.accounts'
# FIXME: in trunk, drop the force_write param entirely
def process_translations(self, cr, uid, langs, in_obj, in_field, in_ids, out_obj, out_ids, force_write=False, context=None):
"""
This method copies translations values of templates into new Accounts/Taxes/Journals for languages selected
:param cr: A database cursor
:param uid: ID of the user currently logged in
:param langs: List of languages to load for new records
:param in_field: Name of the translatable field of source templates
:param in_obj: Name of source object of templates.
:param in_ids: List of ids of source object
:param out_obj: Destination object for which translation is to be copied
:param out_ids: List of ids of destination object
:param force_write: Deprecated as of 7.0, do not use
:param context: usual context information. May contain the key 'lang', which is the language of the user running
the wizard, that will be used if force_write is True
:return: True
"""
if context is None:
context = {}
src = {}
xlat_obj = self.pool.get('ir.translation')
#find the source from Account Template
for x in in_obj.browse(cr, uid, in_ids):
src.update({x.id: x.name})
for lang in langs:
#find the value from Translation
value = xlat_obj._get_ids(cr, uid, in_obj._name + ',' + in_field, 'model', lang, in_ids)
for j in range(len(in_ids)):
in_id = in_ids[j]
if value[in_id]:
#copy Translation from Source to Destination object
xlat_obj.create(cr, uid, {
'name': out_obj._name + ',' + in_field,
'type': 'model',
'res_id': out_ids[j],
'lang': lang,
'src': src[in_id],
'value': value[in_id],
})
else:
_logger.info('Language: %s. Translation from template: there is no translation available for %s!' %(lang, src[in_id]))#out_obj._name))
return True
def execute(self, cr, uid, ids, context=None):
if not context:
context = {}
# remove the lang to get the untranslated value
ctx = dict(context, lang=None)
res = super(wizard_multi_charts_accounts, self).execute(cr, uid, ids, context=ctx)
obj_multi = self.browse(cr, uid, ids[0], context=context)
company_id = obj_multi.company_id.id
# load languages
langs = []
res_lang_obj = self.pool.get('res.lang')
installed_lang_ids = res_lang_obj.search(cr, uid, [])
installed_langs = [x.code for x in res_lang_obj.browse(cr, uid, installed_lang_ids, context=context)]
if obj_multi.chart_template_id.spoken_languages:
for lang in obj_multi.chart_template_id.spoken_languages.split(';'):
if lang not in installed_langs:
# the language is not installed, so we don't need to load its translations
continue
else:
# the language was already installed, so the po files have been loaded at the installation time
# and now we need to copy the translations of templates to the right objects
langs.append(lang)
if langs:
# write account.account translations in the real COA
self._process_accounts_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.tax.code translations
self._process_tax_codes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.tax translations
self._process_taxes_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
# copy account.fiscal.position translations
self._process_fiscal_pos_translations(cr, uid, obj_multi, company_id, langs, 'name', context=context)
return res
def _process_accounts_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_acc_template = self.pool.get('account.account.template')
obj_acc = self.pool.get('account.account')
acc_template_root_id = obj_multi.chart_template_id.account_root_id.id
acc_root_id = obj_acc.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0]
in_ids = obj_acc_template.search(cr, uid, [('id', 'child_of', [acc_template_root_id])], order='id')[1:]
out_ids = obj_acc.search(cr, uid, [('id', 'child_of', [acc_root_id])], order='id')[1:]
return self.process_translations(cr, uid, langs, obj_acc_template, field, in_ids, obj_acc, out_ids, context=context)
def _process_tax_codes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_root_id = obj_multi.chart_template_id.tax_code_root_id.id
tax_code_root_id = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', None)])[0]
in_ids = obj_tax_code_template.search(cr, uid, [('id', 'child_of', [tax_code_template_root_id])], order='id')[1:]
out_ids = obj_tax_code.search(cr, uid, [('id', 'child_of', [tax_code_root_id])], order='id')[1:]
return self.process_translations(cr, uid, langs, obj_tax_code_template, field, in_ids, obj_tax_code, out_ids, context=context)
def _process_taxes_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_tax_template = self.pool.get('account.tax.template')
obj_tax = self.pool.get('account.tax')
in_ids = [x.id for x in obj_multi.chart_template_id.tax_template_ids]
out_ids = obj_tax.search(cr, uid, [('company_id', '=', company_id)], order='id')
return self.process_translations(cr, uid, langs, obj_tax_template, field, in_ids, obj_tax, out_ids, context=context)
def _process_fiscal_pos_translations(self, cr, uid, obj_multi, company_id, langs, field, context=None):
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
obj_fiscal_position = self.pool.get('account.fiscal.position')
in_ids = obj_fiscal_position_template.search(cr, uid, [('chart_template_id', '=', obj_multi.chart_template_id.id)], order='id')
out_ids = obj_fiscal_position.search(cr, uid, [('company_id', '=', company_id)], order='id')
return self.process_translations(cr, uid, langs, obj_fiscal_position_template, field, in_ids, obj_fiscal_position, out_ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
r39132/airflow | airflow/utils/log/s3_task_handler.py | 3 | 7121 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.file_task_handler import FileTaskHandler
class S3TaskHandler(FileTaskHandler, LoggingMixin):
"""
S3TaskHandler is a python log handler that handles and reads
task instance logs. It extends airflow FileTaskHandler and
uploads to and reads from S3 remote storage.
"""
def __init__(self, base_log_folder, s3_log_folder, filename_template):
super(S3TaskHandler, self).__init__(base_log_folder, filename_template)
self.remote_base = s3_log_folder
self.log_relative_path = ''
self._hook = None
self.closed = False
self.upload_on_close = True
def _build_hook(self):
remote_conn_id = configuration.conf.get('core', 'REMOTE_LOG_CONN_ID')
try:
from airflow.hooks.S3_hook import S3Hook
return S3Hook(remote_conn_id)
except Exception:
self.log.error(
'Could not create an S3Hook with connection id "%s". '
'Please make sure that airflow[aws] is installed and '
'the S3 connection exists.', remote_conn_id
)
@property
def hook(self):
if self._hook is None:
self._hook = self._build_hook()
return self._hook
def set_context(self, ti):
super(S3TaskHandler, self).set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to S3 remote storage.
self.log_relative_path = self._render_filename(ti, ti.try_number)
self.upload_on_close = not ti.raw
def close(self):
"""
Close and upload local log file to remote storage S3.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super(S3TaskHandler, self).close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from S3 remote storage.
If failed, read the log from task instance host machine.
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# Explicitly getting log relative path is necessary as the given
# task instance might be different than task instance passed in
# in set_context method.
log_relative_path = self._render_filename(ti, try_number)
remote_loc = os.path.join(self.remote_base, log_relative_path)
if self.s3_log_exists(remote_loc):
# If S3 remote file exists, we do not fetch logs from task instance
# local machine even if there are errors reading remote logs, as
# returned remote_log will contain error messages.
remote_log = self.s3_read(remote_loc, return_error=True)
log = '*** Reading remote log from {}.\n{}\n'.format(
remote_loc, remote_log)
return log, {'end_of_log': True}
else:
return super(S3TaskHandler, self)._read(ti, try_number)
def s3_log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
try:
return self.hook.get_key(remote_log_location) is not None
except Exception:
pass
return False
def s3_read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
try:
return self.hook.read_key(remote_log_location)
except Exception:
msg = 'Could not read logs from {}'.format(remote_log_location)
self.log.exception(msg)
# return error if needed
if return_error:
return msg
def s3_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except Exception:
self.log.exception('Could not write logs to %s', remote_log_location)
| apache-2.0 |
raphaelfruneaux/scrapy | scrapy/utils/iterators.py | 93 | 4569 | import re
import csv
import logging
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
import six
from scrapy.http import TextResponse, Response
from scrapy.selector import Selector
from scrapy.utils.python import re_rsearch, to_unicode
logger = logging.getLogger(__name__)
def xmliter(obj, nodename):
"""Return a iterator of Selector's over all nodes of a XML document,
given tha name of the node to iterate. Useful for parsing XML feeds.
obj can be:
- a Response object
- a unicode string
- a string encoded as utf-8
"""
HEADER_START_RE = re.compile(r'^(.*?)<\s*%s(?:\s|>)' % nodename, re.S)
HEADER_END_RE = re.compile(r'<\s*/%s\s*>' % nodename, re.S)
text = _body_or_str(obj)
header_start = re.search(HEADER_START_RE, text)
header_start = header_start.group(1).strip() if header_start else ''
header_end = re_rsearch(HEADER_END_RE, text)
header_end = text[header_end[1]:].strip() if header_end else ''
r = re.compile(r"<%s[\s>].*?</%s>" % (nodename, nodename), re.DOTALL)
for match in r.finditer(text):
nodetext = header_start + match.group() + header_end
yield Selector(text=nodetext, type='xml').xpath('//' + nodename)[0]
def xmliter_lxml(obj, nodename, namespace=None, prefix='x'):
from lxml import etree
reader = _StreamReader(obj)
tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
selxpath = '//' + ('%s:%s' % (prefix, nodename) if namespace else nodename)
for _, node in iterable:
nodetext = etree.tostring(node)
node.clear()
xs = Selector(text=nodetext, type='xml')
if namespace:
xs.register_namespace(prefix, namespace)
yield xs.xpath(selxpath)[0]
class _StreamReader(object):
def __init__(self, obj):
self._ptr = 0
if isinstance(obj, Response):
self._text, self.encoding = obj.body, obj.encoding
else:
self._text, self.encoding = obj, 'utf-8'
self._is_unicode = isinstance(self._text, unicode)
def read(self, n=65535):
self.read = self._read_unicode if self._is_unicode else self._read_string
return self.read(n).lstrip()
def _read_string(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e]
def _read_unicode(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e].encode('utf-8')
def csviter(obj, delimiter=None, headers=None, encoding=None, quotechar=None):
""" Returns an iterator of dictionaries from the given csv object
obj can be:
- a Response object
- a unicode string
- a string encoded as utf-8
delimiter is the character used to separate fields on the given obj.
headers is an iterable that when provided offers the keys
for the returned dictionaries, if not the first row is used.
quotechar is the character used to enclosure fields on the given obj.
"""
encoding = obj.encoding if isinstance(obj, TextResponse) else encoding or 'utf-8'
def _getrow(csv_r):
return [to_unicode(field, encoding) for field in next(csv_r)]
lines = BytesIO(_body_or_str(obj, unicode=False))
kwargs = {}
if delimiter: kwargs["delimiter"] = delimiter
if quotechar: kwargs["quotechar"] = quotechar
csv_r = csv.reader(lines, **kwargs)
if not headers:
headers = _getrow(csv_r)
while True:
row = _getrow(csv_r)
if len(row) != len(headers):
logger.warning("ignoring row %(csvlnum)d (length: %(csvrow)d, "
"should be: %(csvheader)d)",
{'csvlnum': csv_r.line_num, 'csvrow': len(row),
'csvheader': len(headers)})
continue
else:
yield dict(zip(headers, row))
def _body_or_str(obj, unicode=True):
assert isinstance(obj, (Response, six.string_types)), \
"obj must be Response or basestring, not %s" % type(obj).__name__
if isinstance(obj, Response):
if not unicode:
return obj.body
elif isinstance(obj, TextResponse):
return obj.body_as_unicode()
else:
return obj.body.decode('utf-8')
elif isinstance(obj, six.text_type):
return obj if unicode else obj.encode('utf-8')
else:
return obj.decode('utf-8') if unicode else obj
| bsd-3-clause |
walshjon/openmc | openmc/data/laboratory.py | 3 | 4466 | from collections.abc import Iterable
from numbers import Real, Integral
import numpy as np
import openmc.checkvalue as cv
from openmc.stats import Tabular, Univariate, Discrete, Mixture
from .angle_energy import AngleEnergy
from .function import INTERPOLATION_SCHEME
from .endf import get_tab2_record, get_tab1_record
class LaboratoryAngleEnergy(AngleEnergy):
"""Laboratory angle-energy distribution
Parameters
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines for each incoming energy
energy_out : Iterable of Iterable of openmc.stats.Univariate
Distribution of outgoing energies for each incoming energy/scattering
cosine
Attributes
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
mu : Iterable of openmc.stats.Univariate
Distribution of scattering cosines for each incoming energy
energy_out : Iterable of Iterable of openmc.stats.Univariate
Distribution of outgoing energies for each incoming energy/scattering
cosine
"""
def __init__(self, breakpoints, interpolation, energy, mu, energy_out):
super().__init__()
self.breakpoints = breakpoints
self.interpolation = interpolation
self.energy = energy
self.mu = mu
self.energy_out = energy_out
@property
def breakpoints(self):
return self._breakpoints
@property
def interpolation(self):
return self._interpolation
@property
def energy(self):
return self._energy
@property
def mu(self):
return self._mu
@property
def energy_out(self):
return self._energy_out
@breakpoints.setter
def breakpoints(self, breakpoints):
cv.check_type('laboratory angle-energy breakpoints', breakpoints,
Iterable, Integral)
self._breakpoints = breakpoints
@interpolation.setter
def interpolation(self, interpolation):
cv.check_type('laboratory angle-energy interpolation', interpolation,
Iterable, Integral)
self._interpolation = interpolation
@energy.setter
def energy(self, energy):
cv.check_type('laboratory angle-energy incoming energy', energy,
Iterable, Real)
self._energy = energy
@mu.setter
def mu(self, mu):
cv.check_type('laboratory angle-energy outgoing cosine', mu,
Iterable, Univariate)
self._mu = mu
@energy_out.setter
def energy_out(self, energy_out):
cv.check_iterable_type('laboratory angle-energy outgoing energy',
energy_out, Univariate, 2, 2)
self._energy_out = energy_out
@classmethod
def from_endf(cls, file_obj):
"""Generate laboratory angle-energy distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positioned at the start of a section for a correlated
angle-energy distribution
Returns
-------
openmc.data.LaboratoryAngleEnergy
Laboratory angle-energy distribution
"""
params, tab2 = get_tab2_record(file_obj)
ne = params[5]
energy = np.zeros(ne)
mu = []
energy_out = []
for i in range(ne):
params, tab2mu = get_tab2_record(file_obj)
energy[i] = params[1]
n_mu = params[5]
mu_i = np.zeros(n_mu)
p_mu_i = np.zeros(n_mu)
energy_out_i = []
for j in range(n_mu):
params, f = get_tab1_record(file_obj)
mu_i[j] = params[1]
p_mu_i[j] = sum(f.y)
energy_out_i.append(Tabular(f.x, f.y))
mu.append(Tabular(mu_i, p_mu_i))
energy_out.append(energy_out_i)
return cls(tab2.breakpoints, tab2.interpolation, energy, mu, energy_out)
def to_hdf5(self, group):
raise NotImplementedError
| mit |
manevant/django-oscar | src/oscar/apps/basket/admin.py | 37 | 1152 | from django.contrib import admin
from oscar.core.loading import get_model
Line = get_model('basket', 'line')
class LineInline(admin.TabularInline):
model = Line
readonly_fields = ('line_reference', 'product', 'price_excl_tax',
'price_incl_tax', 'price_currency', 'stockrecord')
class LineAdmin(admin.ModelAdmin):
list_display = ('id', 'basket', 'product', 'stockrecord', 'quantity',
'price_excl_tax', 'price_currency', 'date_created')
readonly_fields = ('basket', 'stockrecord', 'line_reference', 'product',
'price_currency', 'price_incl_tax', 'price_excl_tax',
'quantity')
class BasketAdmin(admin.ModelAdmin):
list_display = ('id', 'owner', 'status', 'num_lines',
'contains_a_voucher', 'date_created', 'date_submitted',
'time_before_submit')
readonly_fields = ('owner', 'date_merged', 'date_submitted')
inlines = [LineInline]
admin.site.register(get_model('basket', 'basket'), BasketAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(get_model('basket', 'LineAttribute'))
| bsd-3-clause |
bliz937/kivy | kivy/factory.py | 55 | 5771 | '''
Factory object
==============
The factory can be used to automatically register any class or module
and instantiate classes from it anywhere in your project. It is an
implementation of the
`Factory Pattern <http://en.wikipedia.org/wiki/Factory_pattern>`_.
The class list and available modules are automatically generated by setup.py.
Example for registering a class/module::
>>> from kivy.factory import Factory
>>> Factory.register('Widget', module='kivy.uix.widget')
>>> Factory.register('Vector', module='kivy.vector')
Example of using the Factory::
>>> from kivy.factory import Factory
>>> widget = Factory.Widget(pos=(456,456))
>>> vector = Factory.Vector(9, 2)
Example using a class name::
>>> from kivy.factory import Factory
>>> Factory.register('MyWidget', cls=MyWidget)
By default, the first classname you register via the factory is permanent.
If you wish to change the registered class, you need to unregister the
classname before you re-assign it::
>>> from kivy.factory import Factory
>>> Factory.register('MyWidget', cls=MyWidget)
>>> widget = Factory.MyWidget()
>>> Factory.unregister('MyWidget')
>>> Factory.register('MyWidget', cls=CustomWidget)
>>> customWidget = Factory.MyWidget()
'''
__all__ = ('Factory', 'FactoryException')
from kivy.logger import Logger
class FactoryException(Exception):
pass
class FactoryBase(object):
def __init__(self):
super(FactoryBase, self).__init__()
self.classes = {}
def is_template(self, classname):
'''Return True if the classname is a template from the
:class:`~kivy.lang.Builder`.
.. versionadded:: 1.0.5
'''
if classname in self.classes:
return self.classes[classname]['is_template']
else:
return False
def register(self, classname, cls=None, module=None, is_template=False,
baseclasses=None, filename=None, warn=False):
'''Register a new classname referring to a real class or
class definition in a module. Warn, if True will emit a warning message
when a class is re-declared.
.. versionchanged:: 1.9.0
`warn` was added.
.. versionchanged:: 1.7.0
:attr:`baseclasses` and :attr:`filename` added
.. versionchanged:: 1.0.5
:attr:`is_template` has been added in 1.0.5.
'''
if cls is None and module is None and baseclasses is None:
raise ValueError(
'You must specify either cls= or module= or baseclasses =')
if classname in self.classes:
if warn:
info = self.classes[classname]
Logger.warning('Factory: Ignored class "{}" re-declaration. '
'Current - module: {}, cls: {}, baseclass: {}, filename: {}. '
'Ignored - module: {}, cls: {}, baseclass: {}, filename: {}.'.
format(classname, info['module'], info['cls'],
info['baseclasses'], info['filename'], module, cls,
baseclasses, filename))
return
self.classes[classname] = {
'module': module,
'cls': cls,
'is_template': is_template,
'baseclasses': baseclasses,
'filename': filename}
def unregister(self, *classnames):
'''Unregisters the classnames previously registered via the
register method. This allows the same classnames to be re-used in
different contexts.
.. versionadded:: 1.7.1
'''
for classname in classnames:
if classname in self.classes:
self.classes.pop(classname)
def unregister_from_filename(self, filename):
'''Unregister all the factory objects related to the filename passed in
the parameter.
.. versionadded:: 1.7.0
'''
to_remove = [x for x in self.classes
if self.classes[x]['filename'] == filename]
for name in to_remove:
del self.classes[name]
def __getattr__(self, name):
classes = self.classes
if name not in classes:
if name[0] == name[0].lower():
# if trying to access attributes like checking for `bind`
# then raise AttributeError
raise AttributeError
raise FactoryException('Unknown class <%s>' % name)
item = classes[name]
cls = item['cls']
# No class to return, import the module
if cls is None:
if item['module']:
module = __import__(name=item['module'], fromlist='.')
if not hasattr(module, name):
raise FactoryException(
'No class named <%s> in module <%s>' % (
name, item['module']))
cls = item['cls'] = getattr(module, name)
elif item['baseclasses']:
rootwidgets = []
for basecls in item['baseclasses'].split('+'):
rootwidgets.append(Factory.get(basecls))
cls = item['cls'] = type(str(name), tuple(rootwidgets), {})
else:
raise FactoryException('No information to create the class')
return cls
get = __getattr__
#: Factory instance to use for getting new classes
Factory = FactoryBase()
# Now import the file with all registers
# automatically generated by build_factory
import kivy.factory_registers # NOQA
Logger.info('Factory: %d symbols loaded' % len(Factory.classes))
if __name__ == '__main__':
Factory.register('Vector', module='kivy.vector')
Factory.register('Widget', module='kivy.uix.widget')
| mit |
wasade/qiita | qiita_pet/handlers/download.py | 2 | 1553 | from tornado.web import authenticated
from os.path import basename
from .base_handlers import BaseHandler
from qiita_pet.exceptions import QiitaPetAuthorizationError
from qiita_db.util import filepath_id_to_rel_path
from qiita_db.meta_util import get_accessible_filepath_ids
class DownloadHandler(BaseHandler):
@authenticated
def get(self, filepath_id):
filepath_id = int(filepath_id)
# Check access to file
accessible_filepaths = get_accessible_filepath_ids(self.current_user)
if filepath_id not in accessible_filepaths:
raise QiitaPetAuthorizationError(
self.current_user, 'filepath id %s' % str(filepath_id))
relpath = filepath_id_to_rel_path(filepath_id)
fname = basename(relpath)
# If we don't have nginx, write a file that indicates this
self.write("This installation of Qiita was not equipped with nginx, "
"so it is incapable of serving files. The file you "
"attempted to download is located at %s" % relpath)
self.set_header('Content-Description', 'File Transfer')
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Transfer-Encoding', 'binary')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header('X-Accel-Redirect', '/protected/' + relpath)
self.set_header('Content-Disposition',
'attachment; filename=%s' % fname)
self.finish()
| bsd-3-clause |
MobinRanjbar/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/math.py | 56 | 1068 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import MATHNS
from .element import Element
# ODF 1.0 section 12.5
# Mathematical content is represented by MathML 2.0
# Autogenerated
def Math(**args):
return Element(qname = (MATHNS,'math'), **args)
| apache-2.0 |
fengshao0907/vitess | py/vtdb/vtdb_logger.py | 10 | 3117 | # Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
# VtdbLogger's methods are called whenever something worth noting happens.
# The default behavior of the class is to log using the logging module.
# Registering a new implementation allows the client code to report the
# conditions to any custom reporting mechanism.
#
# We use this in the following cases:
# - error reporting (an exception happened)
# - performance logging (calls to other services took that long)
class VtdbLogger(object):
#
# topology callbacks
#
# topo_keyspace_fetch is called when we successfully get a SrvKeyspace object.
def topo_keyspace_fetch(self, keyspace_name, topo_rtt):
logging.info('Fetched keyspace %s from topo_client in %f secs',
keyspace_name, topo_rtt)
# topo_empty_keyspace_list is called when we get an empty list of
# keyspaces from topo server.
def topo_empty_keyspace_list(self):
logging.warning('topo_empty_keyspace_list')
# topo_bad_keyspace_data is called if we generated an exception
# when reading a keyspace. This is within an exception handler.
def topo_bad_keyspace_data(self, keyspace_name):
logging.exception('error getting or parsing keyspace data for %s',
keyspace_name)
# topo_zkocc_error is called whenever we get a zkocc.ZkOccError
# when trying to resolve an endpoint.
def topo_zkocc_error(self, message, db_key, e):
logging.warning('topo_zkocc_error: %s for %s: %s', message, db_key, e)
# topo_exception is called whenever we get an exception when trying
# to resolve an endpoint (that is not a zkocc.ZkOccError, these get
# handled by topo_zkocc_error).
def topo_exception(self, message, db_key, e):
logging.warning('topo_exception: %s for %s: %s', message, db_key, e)
#
# vtclient callbacks
#
# Integrity Error is called when mysql throws an IntegrityError on a query.
# This is thrown by both vtclient and vtgatev2.
def integrity_error(self, e):
logging.warning('integrity_error: %s', e)
# vtclient_exception is called when a FatalError is raised by
# vtclient (that error is sent back to the application, the retries
# happen at a lower level). e can be one of
# dbexceptions.{RetryError, FatalError, TxPoolFull}
# or a more generic dbexceptions.OperationalError
def vtclient_exception(self, keyspace_name, shard_name, db_type, e):
logging.warning('vtclient_exception for %s.%s.%s: %s', keyspace_name,
shard_name, db_type, e)
#
# vtgatev2 callbacks
#
# vtgatev2_exception is called when we get an exception talking to vtgate.
def vtgatev2_exception(self, e):
logging.warning('vtgatev2_exception: %s', e)
def log_private_data(self, private_data):
logging.info('Additional exception data %s', private_data)
# registration mechanism for VtdbLogger
__vtdb_logger = VtdbLogger()
def register_vtdb_logger(logger):
global __vtdb_logger
__vtdb_logger = logger
def get_logger():
return __vtdb_logger
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/sandbox/regression/ols_anova_original.py | 33 | 10488 | ''' convenience functions for ANOVA type analysis with OLS
Note: statistical results of ANOVA are not checked, OLS is
checked but not whether the reported results are the ones used
in ANOVA
'''
from __future__ import print_function
import numpy as np
#from scipy import stats
from statsmodels.compat.python import lmap
import statsmodels.api as sm
dt_b = np.dtype([('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float)])
''' too much work using structured masked arrays
dta = np.mafromtxt('dftest3.data', dtype=dt_b)
dta_use = np.ma.column_stack[[dta[col] for col in 'y sex age'.split()]]
'''
dta = np.genfromtxt('dftest3.data')
print(dta.shape)
mask = np.isnan(dta)
print("rows with missing values", mask.any(1).sum())
vars = dict((v[0], (idx, v[1])) for idx, v in enumerate((
('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float))))
datavarnames = 'y sex age'.split()
#possible to avoid temporary array ?
dta_use = dta[:, [vars[col][0] for col in datavarnames]]
keeprows = ~np.isnan(dta_use).any(1)
print('number of complete observations', keeprows.sum())
dta_used = dta_use[keeprows,:]
varsused = dict((k, [dta_used[:,idx], idx, vars[k][1]]) for idx, k in enumerate(datavarnames))
# use function for dummy
#sexgroups = np.unique(dta_used[:,1])
#sexdummy = (dta_used[:,1][:, None] == sexgroups).astype(int)
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1]
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(lmap(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1]
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2
sexdummy = data2dummy(dta_used[:,1])
factors = ['sex']
for k in factors:
varsused[k][0] = data2dummy(varsused[k][0])
products = [('sex', 'age')]
for k in products:
varsused[''.join(k)] = data2proddummy(np.c_[varsused[k[0]][0],varsused[k[1]][0]])
# make dictionary of variables with dummies as one variable
#vars_to_use = {name: data or dummy variables}
X_b0 = np.c_[sexdummy, dta_used[:,2], np.ones((dta_used.shape[0],1))]
y_b0 = dta_used[:,0]
res_b0 = sm.OLS(y_b0, X_b0).results
print(res_b0.params)
print(res_b0.ssr)
anova_str0 = '''
ANOVA statistics (model sum of squares excludes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ess)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
anova_str = '''
ANOVA statistics (model sum of squares includes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ssmwithmean)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
#print(anova_str % dict([('df_model', res.df_model)])
#anovares = ['df_model' , 'df_resid'
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__)
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad
print(anova_str0 % anovadict(res_b0))
#the following leaves the constant in, not with NIST regression
#but something fishy with res.ess negative in examples
print(anova_str % anovadict(res_b0))
print('using sex only')
X2 = np.c_[sexdummy, np.ones((dta_used.shape[0],1))]
res2 = sm.OLS(y_b0, X2).results
print(res2.params)
print(res2.ssr)
print(anova_str % anovadict(res2))
print('using age only')
X3 = np.c_[ dta_used[:,2], np.ones((dta_used.shape[0],1))]
res3 = sm.OLS(y_b0, X3).results
print(res3.params)
print(res3.ssr)
print(anova_str % anovadict(res3))
def form2design(ss, data):
'''convert string formula to data dictionary
ss : string
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list wouldn't be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif not ':' in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names
nobs = 1000
testdataint = np.random.randint(3, size=(nobs,4)).view([('a',int),('b',int),('c',int),('d',int)])
testdatacont = np.random.normal( size=(nobs,2)).view([('e',float), ('f',float)])
import numpy.lib.recfunctions
dt2 = numpy.lib.recfunctions.zip_descr((testdataint, testdatacont),flatten=True)
# concatenate structured arrays
testdata = np.empty((nobs,1), dt2)
for name in testdataint.dtype.names:
testdata[name] = testdataint[name]
for name in testdatacont.dtype.names:
testdata[name] = testdatacont[name]
#print(form2design('a',testdata))
if 0:
xx, n = form2design('F:a',testdata)
print(xx)
print(form2design('P:a*b',testdata))
print(data2proddummy((np.c_[testdata['a'],testdata['b']])))
xx, names = form2design('a F:b P:c*d',testdata)
#xx, names = form2design('I a F:b F:c F:d P:c*d',testdata)
xx, names = form2design('I a F:b P:c*d', testdata)
xx, names = form2design('I a F:b P:c*d G:a*e f', testdata)
X = np.column_stack([xx[nn] for nn in names])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).results
print(rest1.params)
print(anova_str % anovadict(rest1))
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimeted list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli
X = np.column_stack([xx[nn] for nn in dropname('ae f', names)])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).results
print(rest1.params)
print(anova_str % anovadict(rest1))
# Example: from Bruce
# -------------------
# read data set and drop rows with missing data
dta = np.genfromtxt('dftest3.data', dt_b,missing='.', usemask=True)
print('missing', [dta.mask[k].sum() for k in dta.dtype.names])
m = dta.mask.view(bool)
droprows = m.reshape(-1,len(dta.dtype.names)).any(1)
# get complete data as plain structured array
# maybe doesn't work with masked arrays
dta_use_b1 = dta[~droprows,:].data
print(dta_use_b1.shape)
print(dta_use_b1.dtype)
#Example b1: variables from Bruce's glm
# prepare data and dummy variables
xx_b1, names_b1 = form2design('I F:sex age', dta_use_b1)
# create design matrix
X_b1 = np.column_stack([xx_b1[nn] for nn in dropname('', names_b1)])
y_b1 = dta_use_b1['y']
# estimate using OLS
rest_b1 = sm.OLS(y_b1, X_b1).results
# print(results)
print(rest_b1.params)
print(anova_str % anovadict(rest_b1))
#compare with original version only in original version
print(anova_str % anovadict(res_b0))
# Example: use all variables except pig identifier
allexog = ' '.join(dta.dtype.names[:-1])
#'breed sex litter pen pig age bage'
xx_b1a, names_b1a = form2design('I F:breed F:sex F:litter F:pen age bage', dta_use_b1)
X_b1a = np.column_stack([xx_b1a[nn] for nn in dropname('', names_b1a)])
y_b1a = dta_use_b1['y']
rest_b1a = sm.OLS(y_b1a, X_b1a).results
print(rest_b1a.params)
print(anova_str % anovadict(rest_b1a))
for dropn in names_b1a:
print('\nResults dropping', dropn)
X_b1a_ = np.column_stack([xx_b1a[nn] for nn in dropname(dropn, names_b1a)])
y_b1a_ = dta_use_b1['y']
rest_b1a_ = sm.OLS(y_b1a_, X_b1a_).results
#print(rest_b1a_.params
print(anova_str % anovadict(rest_b1a_))
| bsd-3-clause |
dblessing/linux | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
GoogleCloudPlatform/professional-services | examples/dataflow-production-ready/python/ml_preproc/pipeline/model/data_classes.py | 1 | 1406 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from typing import Iterable
# to use it to filter out the header of the CSV file.
HEADER = 'source_address;source_city;target_address;target_city'
# Here we return field names that are not duplicated and can be used with a named tuple.
def _input_fields(header: str, sep: str = ";"):
return header.split(sep)
# Because writing a data class is boring, named tuples just make it much easier
Record = namedtuple('Record', _input_fields(HEADER))
def line2record(line: str, sep: str = ";") -> Iterable[Record]:
""" Transform a line of data into a Record.
Args:
line: A line from the CSV data file
sep: The separator used in the line. Default is ;
Returns:
object:
A Record object
"""
elements = line.split(sep)
return Record(*elements) | apache-2.0 |
dulems/hue | desktop/core/ext-py/tablib-develop/tablib/formats/_tsv.py | 53 | 1299 | # -*- coding: utf-8 -*-
""" Tablib - TSV (Tab Separated Values) Support.
"""
from tablib.compat import is_py3, csv, StringIO
title = 'tsv'
extensions = ('tsv',)
DEFAULT_ENCODING = 'utf-8'
def export_set(dataset):
"""Returns a TSV representation of Dataset."""
stream = StringIO()
if is_py3:
_tsv = csv.writer(stream, delimiter='\t')
else:
_tsv = csv.writer(stream, encoding=DEFAULT_ENCODING, delimiter='\t')
for row in dataset._package(dicts=False):
_tsv.writerow(row)
return stream.getvalue()
def import_set(dset, in_stream, headers=True):
"""Returns dataset from TSV stream."""
dset.wipe()
if is_py3:
rows = csv.reader(in_stream.splitlines(), delimiter='\t')
else:
rows = csv.reader(in_stream.splitlines(), delimiter='\t',
encoding=DEFAULT_ENCODING)
for i, row in enumerate(rows):
# Skip empty rows
if not row:
continue
if (i == 0) and (headers):
dset.headers = row
else:
dset.append(row)
def detect(stream):
"""Returns True if given stream is valid TSV."""
try:
csv.Sniffer().sniff(stream, delimiters='\t')
return True
except (csv.Error, TypeError):
return False
| apache-2.0 |
jeiranj/gensim | gensim/utils.py | 4 | 41124 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains various general utility functions.
"""
from __future__ import with_statement
import logging
logger = logging.getLogger(__name__)
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
import re
import unicodedata
import os
import random
import itertools
import tempfile
from functools import wraps # for `synchronous` function lock
import multiprocessing
import shutil
import sys
from contextlib import contextmanager
import subprocess
import numpy
import scipy.sparse
if sys.version_info[0] >= 3:
unicode = str
from six import iteritems, u, string_types, unichr
from six.moves import xrange
try:
from smart_open import smart_open
except ImportError:
logger.info("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
try:
from pattern.en import parse
logger.info("'pattern' package found; utils.lemmatize() is available for English")
HAS_PATTERN = True
except ImportError:
HAS_PATTERN = False
PAT_ALPHABETIC = re.compile('(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def synchronous(tlockname):
"""
A decorator to place an instance-based lock around a method.
Adapted from http://code.activestate.com/recipes/577105-synchronization-decorator-for-class-methods/
"""
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
tlock = getattr(self, tlockname)
logger.debug("acquiring lock %r for %s" % (tlockname, func.__name__))
with tlock: # use lock as a context manager to perform safe acquire/release pairs
logger.debug("acquired lock %r for %s" % (tlockname, func.__name__))
result = func(self, *args, **kwargs)
logger.debug("releasing lock %r for %s" % (tlockname, func.__name__))
return result
return _synchronizer
return _synched
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(text, lowercase=False, deacc=False, errors="strict", to_lower=False, lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
class SaveLoad(object):
"""
Objects which inherit from this class have save/load functions, which un/pickle
them to disk.
This uses pickle for de/serializing, so objects must not contain
unpicklable attributes, such as lambda functions etc.
"""
@classmethod
def load(cls, fname, mmap=None):
"""
Load a previously saved object from file (also see `save`).
If the object was saved with large arrays stored separately, you can load
these arrays via mmap (shared memory) using `mmap='r'`. Default: don't use
mmap, load large arrays as normal objects.
If the file being loaded is compressed (either '.gz' or '.bz2'), then
`mmap=None` must be set. Load will raise an `IOError` if this condition
is encountered.
"""
logger.info("loading %s object from %s" % (cls.__name__, fname))
compress, subname = SaveLoad._adapt_by_suffix(fname)
obj = unpickle(fname)
obj._load_specials(fname, mmap, compress, subname)
return obj
def _load_specials(self, fname, mmap, compress, subname):
"""
Loads any attributes that were stored specially, and gives the same
opportunity to recursively included SaveLoad instances.
"""
mmap_error = lambda x, y: IOError(
'Cannot mmap compressed object %s in file %s. ' % (x, y) +
'Use `load(fname, mmap=None)` or uncompress files manually.')
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info("loading %s recursively from %s.* with mmap=%s" % (
attrib, cfname, mmap))
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = numpy.load(subname(fname, attrib))['val']
else:
val = numpy.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info("loading %s from %s with mmap=%s" % (
attrib, subname(fname, attrib), mmap))
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with numpy.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = numpy.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = numpy.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = numpy.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info("setting ignored attribute %s to None" % (attrib))
setattr(self, attrib, None)
@staticmethod
def _adapt_by_suffix(fname):
"""Give appropriate compress setting and filename formula"""
if fname.endswith('.gz') or fname.endswith('.bz2'):
compress = True
subname = lambda *args: '.'.join(list(args) + ['npz'])
else:
compress = False
subname = lambda *args: '.'.join(list(args) + ['npy'])
return (compress, subname)
def _smart_save(self, fname, separately=None, sep_limit=10 * 1024**2,
ignore=frozenset(), pickle_protocol=2):
"""
Save the object to file (also see `load`).
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
`ignore` is a set of attribute names to *not* serialize (file
handles, caches etc). On subsequent load() these attributes will
be set to None.
`pickle_protocol` defaults to 2 so the pickled object can be imported
in both Python 2 and 3.
"""
logger.info(
"saving %s object under %s, separately %s" % (
self.__class__.__name__, fname, separately))
compress, subname = SaveLoad._adapt_by_suffix(fname)
restores = self._save_specials(fname, separately, sep_limit, ignore, pickle_protocol,
compress, subname)
try:
pickle(self, fname, protocol=pickle_protocol)
finally:
# restore attribs handled specially
for obj, asides in restores:
for attrib, val in iteritems(asides):
setattr(obj, attrib, val)
def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
"""
Save aside any attributes that need to be handled separately, including
by recursion any attributes that are themselves SaveLoad instances.
Returns a list of (obj, {attrib: value, ...}) settings that the caller
should use to restore each object's attributes that were set aside
during the default pickle().
"""
asides = {}
sparse_matrices = (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)
if separately is None:
separately = []
for attrib, val in iteritems(self.__dict__):
if isinstance(val, numpy.ndarray) and val.size >= sep_limit:
separately.append(attrib)
elif isinstance(val, sparse_matrices) and val.nnz >= sep_limit:
separately.append(attrib)
# whatever's in `separately` or `ignore` at this point won't get pickled
for attrib in separately + list(ignore):
if hasattr(self, attrib):
asides[attrib] = getattr(self, attrib)
delattr(self, attrib)
recursive_saveloads = []
restores = []
for attrib, val in iteritems(self.__dict__):
if hasattr(val, '_save_specials'): # better than 'isinstance(val, SaveLoad)' if IPython reloading
recursive_saveloads.append(attrib)
cfname = '.'.join((fname,attrib))
restores.extend(val._save_specials(cfname, None, sep_limit, ignore,
pickle_protocol, compress, subname))
try:
numpys, scipys, ignoreds = [], [], []
for attrib, val in iteritems(asides):
if isinstance(val, numpy.ndarray) and attrib not in ignore:
numpys.append(attrib)
logger.info("storing numpy array '%s' to %s" % (
attrib, subname(fname, attrib)))
if compress:
numpy.savez_compressed(subname(fname, attrib), val=numpy.ascontiguousarray(val))
else:
numpy.save(subname(fname, attrib), numpy.ascontiguousarray(val))
elif isinstance(val, (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)) and attrib not in ignore:
scipys.append(attrib)
logger.info("storing scipy.sparse array '%s' under %s" % (
attrib, subname(fname, attrib)))
if compress:
numpy.savez_compressed(subname(fname, attrib, 'sparse'),
data=val.data,
indptr=val.indptr,
indices=val.indices)
else:
numpy.save(subname(fname, attrib, 'data'), val.data)
numpy.save(subname(fname, attrib, 'indptr'), val.indptr)
numpy.save(subname(fname, attrib, 'indices'), val.indices)
data, indptr, indices = val.data, val.indptr, val.indices
val.data, val.indptr, val.indices = None, None, None
try:
# store array-less object
pickle(val, subname(fname, attrib), protocol=pickle_protocol)
finally:
val.data, val.indptr, val.indices = data, indptr, indices
else:
logger.info("not storing attribute %s" % (attrib))
ignoreds.append(attrib)
self.__dict__['__numpys'] = numpys
self.__dict__['__scipys'] = scipys
self.__dict__['__ignoreds'] = ignoreds
self.__dict__['__recursive_saveloads'] = recursive_saveloads
except:
# restore the attributes if exception-interrupted
for attrib, val in iteritems(asides):
setattr(self, attrib, val)
raise
return restores + [(self, asides)]
def save(self, fname_or_handle, separately=None, sep_limit=10 * 1024**2,
ignore=frozenset(), pickle_protocol=2):
"""
Save the object to file (also see `load`).
`fname_or_handle` is either a string specifying the file name to
save to, or an open file-like object which can be written to. If
the object is a file handle, no special array handling will be
performed; all attributes will be saved to the same file.
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
`ignore` is a set of attribute names to *not* serialize (file
handles, caches etc). On subsequent load() these attributes will
be set to None.
`pickle_protocol` defaults to 2 so the pickled object can be imported
in both Python 2 and 3.
"""
try:
_pickle.dump(self, fname_or_handle, protocol=pickle_protocol)
logger.info("saved %s object" % self.__class__.__name__)
except TypeError: # `fname_or_handle` does not have write attribute
self._smart_save(fname_or_handle, separately, sep_limit, ignore,
pickle_protocol=pickle_protocol)
#endclass SaveLoad
def identity(p):
"""Identity fnc, for flows that don't accept lambda (pickling etc)."""
return p
def get_max_id(corpus):
"""
Return the highest feature id that appears in the corpus.
For empty corpora (no features at all), return -1.
"""
maxid = -1
for document in corpus:
maxid = max(maxid, max([-1] + [fieldid for fieldid, _ in document])) # [-1] to avoid exceptions from max(empty)
return maxid
class FakeDict(object):
"""
Objects of this class act as dictionaries that map integer->str(integer), for
a specified range of integers <0, num_terms).
This is meant to avoid allocating real dictionaries when `num_terms` is huge, which
is a waste of memory.
"""
def __init__(self, num_terms):
self.num_terms = num_terms
def __str__(self):
return "FakeDict(num_terms=%s)" % self.num_terms
def __getitem__(self, val):
if 0 <= val < self.num_terms:
return str(val)
raise ValueError("internal id out of bounds (%s, expected <0..%s))" %
(val, self.num_terms))
def iteritems(self):
for i in xrange(self.num_terms):
yield i, str(i)
def keys(self):
"""
Override the dict.keys() function, which is used to determine the maximum
internal id of a corpus = the vocabulary dimensionality.
HACK: To avoid materializing the whole `range(0, self.num_terms)`, this returns
the highest id = `[self.num_terms - 1]` only.
"""
return [self.num_terms - 1]
def __len__(self):
return self.num_terms
def get(self, val, default=None):
if 0 <= val < self.num_terms:
return str(val)
return default
def dict_from_corpus(corpus):
"""
Scan corpus for all word ids that appear in it, then construct and return a mapping
which maps each `wordId -> str(wordId)`.
This function is used whenever *words* need to be displayed (as opposed to just
their ids) but no wordId->word mapping was provided. The resulting mapping
only covers words actually used in the corpus, up to the highest wordId found.
"""
num_terms = 1 + get_max_id(corpus)
id2word = FakeDict(num_terms)
return id2word
def is_corpus(obj):
"""
Check whether `obj` is a corpus. Return (is_corpus, new) 2-tuple, where
`new is obj` if `obj` was an iterable, or `new` yields the same sequence as
`obj` if it was an iterator.
`obj` is a corpus if it supports iteration over documents, where a document
is in turn anything that acts as a sequence of 2-tuples (int, float).
Note: An "empty" corpus (empty input sequence) is ambiguous, so in this case the
result is forcefully defined as `is_corpus=False`.
"""
try:
if 'Corpus' in obj.__class__.__name__: # the most common case, quick hack
return True, obj
except:
pass
try:
if hasattr(obj, 'next') or hasattr(obj, '__next__'):
# the input is an iterator object, meaning once we call next()
# that element could be gone forever. we must be careful to put
# whatever we retrieve back again
doc1 = next(obj)
obj = itertools.chain([doc1], obj)
else:
doc1 = next(iter(obj)) # empty corpus is resolved to False here
if len(doc1) == 0: # sparse documents must have a __len__ function (list, tuple...)
return True, obj # the first document is empty=>assume this is a corpus
id1, val1 = next(iter(doc1)) # if obj is a numpy array, it resolves to False here
id1, val1 = int(id1), float(val1) # must be a 2-tuple (integer, float)
except:
return False, obj
return True, obj
def get_my_ip():
"""
Try to obtain our external ip (from the pyro nameserver's point of view)
This tries to sidestep the issue of bogus `/etc/hosts` entries and other
local misconfigurations, which often mess up hostname resolution.
If all else fails, fall back to simple `socket.gethostbyname()` lookup.
"""
import socket
try:
import Pyro4
# we know the nameserver must exist, so use it as our anchor point
ns = Pyro4.naming.locateNS()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((ns._pyroUri.host, ns._pyroUri.port))
result, port = s.getsockname()
except:
try:
# see what ifconfig says about our default interface
import commands
result = commands.getoutput("ifconfig").split("\n")[1].split()[1][5:]
if len(result.split('.')) != 4:
raise Exception()
except:
# give up, leave the resolution to gethostbyname
result = socket.gethostbyname(socket.gethostname())
return result
class RepeatCorpus(SaveLoad):
"""
Used in the tutorial on distributed computing and likely not useful anywhere else.
"""
def __init__(self, corpus, reps):
"""
Wrap a `corpus` as another corpus of length `reps`. This is achieved by
repeating documents from `corpus` over and over again, until the requested
length `len(result)==reps` is reached. Repetition is done
on-the-fly=efficiently, via `itertools`.
>>> corpus = [[(1, 0.5)], []] # 2 documents
>>> list(RepeatCorpus(corpus, 5)) # repeat 2.5 times to get 5 documents
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)]]
"""
self.corpus = corpus
self.reps = reps
def __iter__(self):
return itertools.islice(itertools.cycle(self.corpus), self.reps)
class RepeatCorpusNTimes(SaveLoad):
def __init__(self, corpus, n):
"""
Repeat a `corpus` `n` times.
>>> corpus = [[(1, 0.5)], []]
>>> list(RepeatCorpusNTimes(corpus, 3)) # repeat 3 times
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)], []]
"""
self.corpus = corpus
self.n = n
def __iter__(self):
for _ in xrange(self.n):
for document in self.corpus:
yield document
class ClippedCorpus(SaveLoad):
def __init__(self, corpus, max_docs=None):
"""
Return a corpus that is the "head" of input iterable `corpus`.
Any documents after `max_docs` are ignored. This effectively limits the
length of the returned corpus to <= `max_docs`. Set `max_docs=None` for
"no limit", effectively wrapping the entire input corpus.
"""
self.corpus = corpus
self.max_docs = max_docs
def __iter__(self):
return itertools.islice(self.corpus, self.max_docs)
def __len__(self):
return min(self.max_docs, len(self.corpus))
class SlicedCorpus(SaveLoad):
def __init__(self, corpus, slice_):
"""
Return a corpus that is the slice of input iterable `corpus`.
Negative slicing can only be used if the corpus is indexable.
Otherwise, the corpus will be iterated over.
Slice can also be a numpy.ndarray to support fancy indexing.
NOTE: calculating the size of a SlicedCorpus is expensive
when using a slice as the corpus has to be iterated over once.
Using a list or numpy.ndarray does not have this drawback, but
consumes more memory.
"""
self.corpus = corpus
self.slice_ = slice_
self.length = None
def __iter__(self):
if hasattr(self.corpus, 'index') and len(self.corpus.index) > 0:
return (self.corpus.docbyoffset(i) for i in
self.corpus.index[self.slice_])
else:
return itertools.islice(self.corpus, self.slice_.start,
self.slice_.stop, self.slice_.step)
def __len__(self):
# check cached length, calculate if needed
if self.length is None:
if isinstance(self.slice_, (list, numpy.ndarray)):
self.length = len(self.slice_)
else:
self.length = sum(1 for x in self)
return self.length
def safe_unichr(intval):
try:
return unichr(intval)
except ValueError:
# ValueError: unichr() arg not in range(0x10000) (narrow Python build)
s = "\\U%08x" % intval
# return UTF16 surrogate pair
return s.decode('unicode-escape')
def decode_htmlentities(text):
"""
Decode HTML entities in text, coded as hex, decimal or named.
Adapted from http://github.com/sku/python-twitter-ircbot/blob/321d94e0e40d0acc92f5bf57d126b57369da70de/html_decode.py
>>> u = u'E tu vivrai nel terrore - L'aldilà (1981)'
>>> print(decode_htmlentities(u).encode('UTF-8'))
E tu vivrai nel terrore - L'aldilà (1981)
>>> print(decode_htmlentities("l'eau"))
l'eau
>>> print(decode_htmlentities("foo < bar"))
foo < bar
"""
def substitute_entity(match):
try:
ent = match.group(3)
if match.group(1) == "#":
# decoding by number
if match.group(2) == '':
# number is in decimal
return safe_unichr(int(ent))
elif match.group(2) in ['x', 'X']:
# number is in hex
return safe_unichr(int(ent, 16))
else:
# they were using a name
cp = n2cp.get(ent)
if cp:
return safe_unichr(cp)
else:
return match.group()
except:
# in case of errors, return original input
return match.group()
return RE_HTML_ENTITY.sub(substitute_entity, text)
def chunkize_serial(iterable, chunksize, as_numpy=False):
"""
Return elements from the iterable in `chunksize`-ed lists. The last returned
element may be smaller (if length of collection is not divisible by `chunksize`).
>>> print(list(grouper(range(10), 3)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
"""
import numpy
it = iter(iterable)
while True:
if as_numpy:
# convert each document to a 2d numpy array (~6x faster when transmitting
# chunk data over the wire, in Pyro)
wrapped_chunk = [[numpy.array(doc) for doc in itertools.islice(it, int(chunksize))]]
else:
wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]
if not wrapped_chunk[0]:
break
# memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference
yield wrapped_chunk.pop()
grouper = chunkize_serial
class InputQueue(multiprocessing.Process):
def __init__(self, q, corpus, chunksize, maxsize, as_numpy):
super(InputQueue, self).__init__()
self.q = q
self.maxsize = maxsize
self.corpus = corpus
self.chunksize = chunksize
self.as_numpy = as_numpy
def run(self):
if self.as_numpy:
import numpy # don't clutter the global namespace with a dependency on numpy
it = iter(self.corpus)
while True:
chunk = itertools.islice(it, self.chunksize)
if self.as_numpy:
# HACK XXX convert documents to numpy arrays, to save memory.
# This also gives a scipy warning at runtime:
# "UserWarning: indices array has non-integer dtype (float64)"
wrapped_chunk = [[numpy.asarray(doc) for doc in chunk]]
else:
wrapped_chunk = [list(chunk)]
if not wrapped_chunk[0]:
self.q.put(None, block=True)
break
try:
qsize = self.q.qsize()
except NotImplementedError:
qsize = '?'
logger.debug("prepared another chunk of %i documents (qsize=%s)" %
(len(wrapped_chunk[0]), qsize))
self.q.put(wrapped_chunk.pop(), block=True)
#endclass InputQueue
if os.name == 'nt':
logger.info("detected Windows; aliasing chunkize to chunkize_serial")
def chunkize(corpus, chunksize, maxsize=0, as_numpy=False):
for chunk in chunkize_serial(corpus, chunksize, as_numpy=as_numpy):
yield chunk
else:
def chunkize(corpus, chunksize, maxsize=0, as_numpy=False):
"""
Split a stream of values into smaller chunks.
Each chunk is of length `chunksize`, except the last one which may be smaller.
A once-only input stream (`corpus` from a generator) is ok, chunking is done
efficiently via itertools.
If `maxsize > 1`, don't wait idly in between successive chunk `yields`, but
rather keep filling a short queue (of size at most `maxsize`) with forthcoming
chunks in advance. This is realized by starting a separate process, and is
meant to reduce I/O delays, which can be significant when `corpus` comes
from a slow medium (like harddisk).
If `maxsize==0`, don't fool around with parallelism and simply yield the chunksize
via `chunkize_serial()` (no I/O optimizations).
>>> for chunk in chunkize(range(10), 4): print(chunk)
[0, 1, 2, 3]
[4, 5, 6, 7]
[8, 9]
"""
assert chunksize > 0
if maxsize > 0:
q = multiprocessing.Queue(maxsize=maxsize)
worker = InputQueue(q, corpus, chunksize, maxsize=maxsize, as_numpy=as_numpy)
worker.daemon = True
worker.start()
while True:
chunk = [q.get(block=True)]
if chunk[0] is None:
break
yield chunk.pop()
else:
for chunk in chunkize_serial(corpus, chunksize, as_numpy=as_numpy):
yield chunk
def smart_extension(fname, ext):
fname, oext = os.path.splitext(fname)
if oext.endswith('.bz2'):
fname = fname + oext[:-4] + ext + '.bz2'
elif oext.endswith('.gz'):
fname = fname + oext[:-3] + ext + '.gz'
else:
fname = fname + oext + ext
return fname
def pickle(obj, fname, protocol=2):
"""Pickle object `obj` to file `fname`.
`protocol` defaults to 2 so pickled objects are compatible across
Python 2.x and 3.x.
"""
with smart_open(fname, 'wb') as fout: # 'b' for binary, needed on Windows
_pickle.dump(obj, fout, protocol=protocol)
def unpickle(fname):
"""Load pickled object from `fname`"""
with smart_open(fname) as f:
# Because of loading from S3 load can't be used (missing readline in smart_open)
return _pickle.loads(f.read())
def revdict(d):
"""
Reverse a dictionary mapping.
When two keys map to the same value, only one of them will be kept in the
result (which one is kept is arbitrary).
"""
return dict((v, k) for (k, v) in iteritems(d))
def toptexts(query, texts, index, n=10):
"""
Debug fnc to help inspect the top `n` most similar documents (according to a
similarity index `index`), to see if they are actually related to the query.
`texts` is any object that can return something insightful for each document
via `texts[docid]`, such as its fulltext or snippet.
Return a list of 3-tuples (docid, doc's similarity to the query, texts[docid]).
"""
sims = index[query] # perform a similarity query against the corpus
sims = sorted(enumerate(sims), key=lambda item: -item[1])
result = []
for topid, topcosine in sims[:n]: # only consider top-n most similar docs
result.append((topid, topcosine, texts[topid]))
return result
def randfname(prefix='gensim'):
randpart = hex(random.randint(0, 0xffffff))[2:]
return os.path.join(tempfile.gettempdir(), prefix + randpart)
def upload_chunked(server, docs, chunksize=1000, preprocess=None):
"""
Memory-friendly upload of documents to a SimServer (or Pyro SimServer proxy).
Use this function to train or index large collections -- avoid sending the
entire corpus over the wire as a single Pyro in-memory object. The documents
will be sent in smaller chunks, of `chunksize` documents each.
"""
start = 0
for chunk in grouper(docs, chunksize):
end = start + len(chunk)
logger.info("uploading documents %i-%i" % (start, end - 1))
if preprocess is not None:
pchunk = []
for doc in chunk:
doc['tokens'] = preprocess(doc['text'])
del doc['text']
pchunk.append(doc)
chunk = pchunk
server.buffer(chunk)
start = end
def getNS():
"""
Return a Pyro name server proxy. If there is no name server running,
start one on 0.0.0.0 (all interfaces), as a background process.
"""
import Pyro4
try:
return Pyro4.locateNS()
except Pyro4.errors.NamingError:
logger.info("Pyro name server not found; starting a new one")
os.system("python -m Pyro4.naming -n 0.0.0.0 &")
# TODO: spawn a proper daemon ala http://code.activestate.com/recipes/278731/ ?
# like this, if there's an error somewhere, we'll never know... (and the loop
# below will block). And it probably doesn't work on windows, either.
while True:
try:
return Pyro4.locateNS()
except:
pass
def pyro_daemon(name, obj, random_suffix=False, ip=None, port=None):
"""
Register object with name server (starting the name server if not running
yet) and block until the daemon is terminated. The object is registered under
`name`, or `name`+ some random suffix if `random_suffix` is set.
"""
if random_suffix:
name += '.' + hex(random.randint(0, 0xffffff))[2:]
import Pyro4
with getNS() as ns:
with Pyro4.Daemon(ip or get_my_ip(), port or 0) as daemon:
# register server for remote access
uri = daemon.register(obj, name)
ns.remove(name)
ns.register(name, uri)
logger.info("%s registered with nameserver (URI '%s')" % (name, uri))
daemon.requestLoop()
if HAS_PATTERN:
def lemmatize(content, allowed_tags=re.compile('(NN|VB|JJ|RB)'), light=False,
stopwords=frozenset(), min_length=2, max_length=15):
"""
This function is only available when the optional 'pattern' package is installed.
Use the English lemmatizer from `pattern` to extract UTF8-encoded tokens in
their base form=lemma, e.g. "are, is, being" -> "be" etc.
This is a smarter version of stemming, taking word context into account.
Only considers nouns, verbs, adjectives and adverbs by default (=all other lemmas are discarded).
>>> lemmatize('Hello World! How is it going?! Nonexistentword, 21')
['world/NN', 'be/VB', 'go/VB', 'nonexistentword/NN']
>>> lemmatize('The study ranks high.')
['study/NN', 'rank/VB', 'high/JJ']
>>> lemmatize('The ranks study hard.')
['rank/NN', 'study/VB', 'hard/RB']
"""
if light:
import warnings
warnings.warn("The light flag is no longer supported by pattern.")
# tokenization in `pattern` is weird; it gets thrown off by non-letters,
# producing '==relate/VBN' or '**/NN'... try to preprocess the text a little
# FIXME this throws away all fancy parsing cues, including sentence structure,
# abbreviations etc.
content = u(' ').join(tokenize(content, lower=True, errors='ignore'))
parsed = parse(content, lemmata=True, collapse=False)
result = []
for sentence in parsed:
for token, tag, _, _, lemma in sentence:
if min_length <= len(lemma) <= max_length and not lemma.startswith('_') and lemma not in stopwords:
if allowed_tags.match(tag):
lemma += "/" + tag[:2]
result.append(lemma.encode('utf8'))
return result
#endif HAS_PATTERN
def mock_data_row(dim=1000, prob_nnz=0.5, lam=1.0):
"""
Create a random gensim sparse vector. Each coordinate is nonzero with
probability `prob_nnz`, each non-zero coordinate value is drawn from
a Poisson distribution with parameter lambda equal to `lam`.
"""
nnz = numpy.random.uniform(size=(dim,))
data = [(i, float(numpy.random.poisson(lam=lam) + 1.0))
for i in xrange(dim) if nnz[i] < prob_nnz]
return data
def mock_data(n_items=1000, dim=1000, prob_nnz=0.5, lam=1.0):
"""
Create a random gensim-style corpus, as a list of lists of (int, float) tuples,
to be used as a mock corpus.
"""
data = [mock_data_row(dim=dim, prob_nnz=prob_nnz, lam=lam)
for _ in xrange(n_items)]
return data
def prune_vocab(vocab, min_reduce, trim_rule=None):
"""
Remove all entries from the `vocab` dictionary with count smaller than `min_reduce`.
Modifies `vocab` in place, returns the sum of all counts that were pruned.
"""
result = 0
old_len = len(vocab)
for w in list(vocab): # make a copy of dict's keys
if not keep_vocab_item(w, vocab[w], min_reduce, trim_rule): # vocab[w] <= min_reduce:
result += vocab[w]
del vocab[w]
logger.info("pruned out %i tokens with count <=%i (before %i, after %i)",
old_len - len(vocab), min_reduce, old_len, len(vocab))
return result
RULE_DEFAULT = 0
RULE_DISCARD = 1
RULE_KEEP = 2
def keep_vocab_item(word, count, min_count, trim_rule=None):
default_res = count >= min_count
if trim_rule is None:
return default_res
else:
rule_res = trim_rule(word, count, min_count)
if rule_res == RULE_KEEP:
return True
elif rule_res == RULE_DISCARD:
return False
else:
return default_res
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
Added extra KeyboardInterrupt handling
"""
try:
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
except KeyboardInterrupt:
process.terminate()
raise
| gpl-3.0 |
onshape-public/onshape-clients | python/onshape_client/oas/models/bt_closed_curve_filter1206.py | 1 | 6979 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_closed_curve_filter1206_all_of
except ImportError:
bt_closed_curve_filter1206_all_of = sys.modules[
"onshape_client.oas.models.bt_closed_curve_filter1206_all_of"
]
try:
from onshape_client.oas.models import bt_query_filter183
except ImportError:
bt_query_filter183 = sys.modules["onshape_client.oas.models.bt_query_filter183"]
class BTClosedCurveFilter1206(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"is_closed": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"is_closed": "isClosed", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_closed_curve_filter1206.BTClosedCurveFilter1206 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
is_closed (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
bt_closed_curve_filter1206_all_of.BTClosedCurveFilter1206AllOf,
bt_query_filter183.BTQueryFilter183,
],
"oneOf": [],
}
| mit |
kylef/refract.py | refract/elements/primitives.py | 1 | 1663 | from refract.elements.base import Element, Attributes, Metadata
class String(Element):
"""
Refract String Element
>>> String(content='Hello')
"""
element = 'string'
def __init__(self, meta: Metadata = None, attributes: Attributes = None,
content: str = None) -> None:
super(String, self).__init__(
meta=meta,
attributes=attributes,
content=content
)
def __lt__(self, other: Element) -> bool:
return self.content < other.content
class Number(Element):
"""
Refract Number Element
>>> Number(content=5)
"""
element = 'number'
def __init__(self, meta: Metadata = None, attributes: Attributes = None,
content=None) -> None:
super(Number, self).__init__(
meta=meta,
attributes=attributes,
content=content
)
def __lt__(self, other: Element) -> bool:
return self.content < other.content
class Boolean(Element):
"""
Refract Boolean Element
>>> Boolean(content=True)
"""
element = 'boolean'
def __init__(self, meta: Metadata = None, attributes: Attributes = None,
content: bool = None) -> None:
super(Boolean, self).__init__(
meta=meta,
attributes=attributes,
content=content
)
class Null(Element):
"""
Refract Null Element
>>> Null()
"""
element = 'null'
def __init__(self, meta: Metadata = None,
attributes: Attributes = None) -> None:
super(Null, self).__init__(meta=meta, attributes=attributes)
| bsd-3-clause |
Nixonite/Chess-RPG | unichess.py | 1 | 1049 | from unicode import *
class UniChess:
def __init__(self):
self._board = [[],[],[],[],[],[],[],[]]
for row in range(2,6):
self._board[row] = ['_']*8
def setup_board(self):
self._board[1] = [uni_chess_pieces['wP']]*8
self._board[6] = [uni_chess_pieces['bP']]*8
self._board[0] = [uni_chess_pieces['wR'],
uni_chess_pieces['wN'],
uni_chess_pieces['wB'],
uni_chess_pieces['wQ'],
uni_chess_pieces['wK'],
uni_chess_pieces['wB'],
uni_chess_pieces['wN'],
uni_chess_pieces['wR']]
self._board[7] = [uni_chess_pieces['bR'],
uni_chess_pieces['bN'],
uni_chess_pieces['bB'],
uni_chess_pieces['bQ'],
uni_chess_pieces['bK'],
uni_chess_pieces['bB'],
uni_chess_pieces['bN'],
uni_chess_pieces['bR']]
for row in range(2,6):
self._board[row] = ['_']*8
def get_board(self):
prettyboard = ['|'.join(row) for row in self._board]
return prettyboard
def set_board(self, value):
self._board = value
board = property(get_board, set_board)
| gpl-2.0 |
heeraj123/oh-mainline | vendor/packages/twisted/doc/core/examples/mouse.py | 19 | 2392 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Example using MouseMan protocol with the SerialPort transport.
"""
# TODO set tty modes, etc.
# This works for me:
# speed 1200 baud; rows 0; columns 0; line = 0;
# intr = ^C; quit = ^\; erase = ^?; kill = ^U; eof = ^D;
# eol = <undef>; eol2 = <undef>; start = ^Q; stop = ^S; susp = ^Z;
# rprnt = ^R; werase = ^W; lnext = ^V; flush = ^O; min = 1; time = 0;
# -parenb -parodd cs7 hupcl -cstopb cread clocal -crtscts ignbrk
# -brkint ignpar -parmrk -inpck -istrip -inlcr -igncr -icrnl -ixon
# -ixoff -iuclc -ixany -imaxbel -opost -olcuc -ocrnl -onlcr -onocr
# -onlret -ofill -ofdel nl0 cr0 tab0 bs0 vt0 ff0 -isig -icanon -iexten
# -echo -echoe -echok -echonl -noflsh -xcase -tostop -echoprt -echoctl
# -echoke
import sys
from twisted.python import usage, log
from twisted.protocols.mice import mouseman
if sys.platform == 'win32':
# win32 serial does not work yet!
raise NotImplementedError, "The SerialPort transport does not currently support Win32"
from twisted.internet import win32eventreactor
win32eventreactor.install()
class Options(usage.Options):
optParameters = [
['port', 'p', '/dev/mouse', 'Device for serial mouse'],
['baudrate', 'b', '1200', 'Baudrate for serial mouse'],
['outfile', 'o', None, 'Logfile [default: sys.stdout]'],
]
class McFooMouse(mouseman.MouseMan):
def down_left(self):
log.msg("LEFT")
def up_left(self):
log.msg("left")
def down_middle(self):
log.msg("MIDDLE")
def up_middle(self):
log.msg("middle")
def down_right(self):
log.msg("RIGHT")
def up_right(self):
log.msg("right")
def move(self, x, y):
log.msg("(%d,%d)" % (x, y))
if __name__ == '__main__':
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
o = Options()
try:
o.parseOptions()
except usage.UsageError, errortext:
print "%s: %s" % (sys.argv[0], errortext)
print "%s: Try --help for usage details." % (sys.argv[0])
raise SystemExit, 1
logFile = sys.stdout
if o.opts['outfile']:
logFile = o.opts['outfile']
log.startLogging(logFile)
SerialPort(McFooMouse(), o.opts['port'], reactor, baudrate=int(o.opts['baudrate']))
reactor.run()
| agpl-3.0 |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/lib2to3/fixes/fix_has_key.py | 39 | 3196 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for has_key().
Calls to .has_key() methods are expressed in terms of the 'in'
operator:
d.has_key(k) -> k in d
CAVEATS:
1) While the primary target of this fixer is dict.has_key(), the
fixer will change any has_key() method call, regardless of its
class.
2) Cases like this will not be converted:
m = d.has_key
if m(k):
...
Only *calls* to has_key() are converted. While it is possible to
convert the above to something like
m = d.__contains__
if m(k):
...
this is currently not done.
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, parenthesize
class FixHasKey(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
after=any*
>
|
negation=not_test<
'not'
anchor=power<
before=any+
trailer< '.' 'has_key' >
trailer<
'('
( not(arglist | argument<any '=' any>) arg=any
| arglist<(not argument<any '=' any>) arg=any ','>
)
')'
>
>
>
"""
def transform(self, node, results):
assert results
syms = self.syms
if (node.parent.type == syms.not_test and
self.pattern.match(node.parent)):
# Don't transform a node matching the first alternative of the
# pattern when its parent matches the second alternative
return None
negation = results.get("negation")
anchor = results["anchor"]
prefix = node.prefix
before = [n.clone() for n in results["before"]]
arg = results["arg"].clone()
after = results.get("after")
if after:
after = [n.clone() for n in after]
if arg.type in (syms.comparison, syms.not_test, syms.and_test,
syms.or_test, syms.test, syms.lambdef, syms.argument):
arg = parenthesize(arg)
if len(before) == 1:
before = before[0]
else:
before = pytree.Node(syms.power, before)
before.prefix = " "
n_op = Name("in", prefix=" ")
if negation:
n_not = Name("not", prefix=" ")
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
new = parenthesize(new)
new = pytree.Node(syms.power, (new,) + tuple(after))
if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
syms.and_expr, syms.shift_expr,
syms.arith_expr, syms.term,
syms.factor, syms.power):
new = parenthesize(new)
new.prefix = prefix
return new
| gpl-2.0 |
guorendong/iridium-browser-ubuntu | third_party/chromite/lib/gdata_lib_unittest.py | 2 | 36491 | # Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the gdata_lib module."""
from __future__ import print_function
import getpass
import mox
import re
import atom.service
import gdata.projecthosting.client as gd_ph_client
import gdata.spreadsheet.service
from chromite.lib import cros_test_lib
from chromite.lib import gdata_lib
from chromite.lib import osutils
# pylint: disable=W0212,E1101
class GdataLibTest(cros_test_lib.OutputTestCase):
"""Tests for methods that escape/unescape strings for speadsheets."""
def testPrepColNameForSS(self):
tests = {
'foo': 'foo',
'Foo': 'foo',
'FOO': 'foo',
'foo bar': 'foobar',
'Foo Bar': 'foobar',
'F O O B A R': 'foobar',
'Foo/Bar': 'foobar',
'Foo Bar/Dude': 'foobardude',
'foo/bar': 'foobar',
}
for col in tests:
expected = tests[col]
self.assertEquals(expected, gdata_lib.PrepColNameForSS(col))
self.assertEquals(expected, gdata_lib.PrepColNameForSS(expected))
def testPrepValForSS(self):
tests = {
None: None,
'': '',
'foo': 'foo',
'foo123': 'foo123',
'123': "'123",
'1.2': "'1.2",
}
for val in tests:
expected = tests[val]
self.assertEquals(expected, gdata_lib.PrepValForSS(val))
def testPrepRowForSS(self):
vals = {
None: None,
'': '',
'foo': 'foo',
'foo123': 'foo123',
'123': "'123",
'1.2': "'1.2",
}
# Create before and after rows (rowIn, rowOut).
rowIn = {}
rowOut = {}
col = 'a' # Column names not important.
for valIn in vals:
valOut = vals[valIn]
rowIn[col] = valIn
rowOut[col] = valOut
col = chr(ord(col) + 1) # Change column name.
self.assertEquals(rowOut, gdata_lib.PrepRowForSS(rowIn))
def testScrubValFromSS(self):
tests = {
None: None,
'foo': 'foo',
'123': '123',
"'123": '123',
}
for val in tests:
expected = tests[val]
self.assertEquals(expected, gdata_lib.ScrubValFromSS(val))
class CredsTest(cros_test_lib.MockOutputTestCase):
"""Tests related to user credentials."""
USER = 'somedude@chromium.org'
PASSWORD = 'worldsbestpassword'
DOCS_TOKEN = 'SomeDocsAuthToken'
TRACKER_TOKEN = 'SomeTrackerAuthToken'
@osutils.TempFileDecorator
def testStoreLoadCreds(self):
# This is the replay script for the test.
creds = gdata_lib.Creds()
# This is the test verification.
with self.OutputCapturer():
creds.SetCreds(self.USER, self.PASSWORD)
self.assertEquals(self.USER, creds.user)
self.assertEquals(self.PASSWORD, creds.password)
self.assertTrue(creds.creds_dirty)
creds.StoreCreds(self.tempfile)
self.assertEquals(self.USER, creds.user)
self.assertEquals(self.PASSWORD, creds.password)
self.assertFalse(creds.creds_dirty)
# Clear user/password before loading from just-created file.
creds.user = None
creds.password = None
self.assertEquals(None, creds.user)
self.assertEquals(None, creds.password)
creds.LoadCreds(self.tempfile)
self.assertEquals(self.USER, creds.user)
self.assertEquals(self.PASSWORD, creds.password)
self.assertFalse(creds.creds_dirty)
@osutils.TempFileDecorator
def testStoreLoadToken(self):
# This is the replay script for the test.
creds = gdata_lib.Creds()
creds.user = self.USER
# This is the test verification.
with self.OutputCapturer():
creds.SetDocsAuthToken(self.DOCS_TOKEN)
self.assertEquals(self.DOCS_TOKEN, creds.docs_auth_token)
self.assertTrue(creds.token_dirty)
creds.SetTrackerAuthToken(self.TRACKER_TOKEN)
self.assertEquals(self.TRACKER_TOKEN, creds.tracker_auth_token)
self.assertTrue(creds.token_dirty)
creds.StoreAuthToken(self.tempfile)
self.assertEquals(self.DOCS_TOKEN, creds.docs_auth_token)
self.assertEquals(self.TRACKER_TOKEN, creds.tracker_auth_token)
self.assertFalse(creds.token_dirty)
# Clear auth_tokens before loading from just-created file.
creds.docs_auth_token = None
creds.tracker_auth_token = None
creds.user = None
creds.LoadAuthToken(self.tempfile)
self.assertEquals(self.DOCS_TOKEN, creds.docs_auth_token)
self.assertEquals(self.TRACKER_TOKEN, creds.tracker_auth_token)
self.assertFalse(creds.token_dirty)
self.assertEquals(self.USER, creds.user)
def testSetCreds(self):
# This is the replay script for the test.
creds = gdata_lib.Creds()
# This is the test verification.
creds.SetCreds(self.USER, password=self.PASSWORD)
self.assertEquals(self.USER, creds.user)
self.assertEquals(self.PASSWORD, creds.password)
self.assertTrue(creds.creds_dirty)
def testSetCredsNoPassword(self):
# Add test-specific mocks/stubs
self.PatchObject(getpass, 'getpass', return_value=self.PASSWORD)
# This is the replay script for the test.
creds = gdata_lib.Creds()
# This is the test verification.
creds.SetCreds(self.USER)
self.assertEquals(self.USER, creds.user)
self.assertEquals(self.PASSWORD, creds.password)
self.assertTrue(creds.creds_dirty)
def testSetDocsToken(self):
# This is the replay script for the test.
creds = gdata_lib.Creds()
# This is the test verification.
creds.SetDocsAuthToken(self.DOCS_TOKEN)
self.assertEquals(self.DOCS_TOKEN, creds.docs_auth_token)
self.assertTrue(creds.token_dirty)
def testSetTrackerToken(self):
# This is the replay script for the test.
creds = gdata_lib.Creds()
# This is the test verification.
creds.SetTrackerAuthToken(self.TRACKER_TOKEN)
self.assertEquals(self.TRACKER_TOKEN, creds.tracker_auth_token)
self.assertTrue(creds.token_dirty)
class SpreadsheetRowTest(cros_test_lib.OutputTestCase):
"""Tests related to spreadsheet row interaction."""
SS_ROW_OBJ = 'SSRowObj'
SS_ROW_NUM = 5
def testEmpty(self):
row = gdata_lib.SpreadsheetRow(self.SS_ROW_OBJ, self.SS_ROW_NUM)
self.assertEquals(0, len(row))
self.assertEquals(self.SS_ROW_OBJ, row.ss_row_obj)
self.assertEquals(self.SS_ROW_NUM, row.ss_row_num)
self.assertRaises(TypeError, row, '__setitem__', 'abc', 'xyz')
self.assertEquals(0, len(row))
self.assertFalse('abc' in row)
def testInit(self):
starting_vals = {'abc': 'xyz', 'foo': 'bar'}
row = gdata_lib.SpreadsheetRow(self.SS_ROW_OBJ, self.SS_ROW_NUM,
starting_vals)
self.assertEquals(len(starting_vals), len(row))
self.assertEquals(starting_vals, row)
self.assertEquals(row['abc'], 'xyz')
self.assertTrue('abc' in row)
self.assertEquals(row['foo'], 'bar')
self.assertTrue('foo' in row)
self.assertEquals(self.SS_ROW_OBJ, row.ss_row_obj)
self.assertEquals(self.SS_ROW_NUM, row.ss_row_num)
self.assertRaises(TypeError, row, '__delitem__', 'abc')
self.assertEquals(len(starting_vals), len(row))
self.assertTrue('abc' in row)
class SpreadsheetCommTest(cros_test_lib.MoxOutputTestCase):
"""Test Speadsheet communication."""
SS_KEY = 'TheSSKey'
WS_NAME = 'TheWSName'
WS_KEY = 'TheWSKey'
USER = 'dude'
PASSWORD = 'shhh'
TOKEN = 'authtoken'
COLUMNS = ('greeting', 'name', 'title')
ROWS = (
{'greeting': 'Hi', 'name': 'George', 'title': 'Mr.'},
{'greeting': 'Howdy', 'name': 'Billy Bob', 'title': 'Mr.'},
{'greeting': 'Yo', 'name': 'Adriane', 'title': 'Ms.'},
)
def MockScomm(self, connect=True):
"""Return a mocked SpreadsheetComm"""
mocked_scomm = self.mox.CreateMock(gdata_lib.SpreadsheetComm)
mocked_scomm._columns = None
mocked_scomm._rows = None
if connect:
mocked_gdclient = self.mox.CreateMock(gdata_lib.RetrySpreadsheetsService)
mocked_scomm.gd_client = mocked_gdclient
mocked_scomm.ss_key = self.SS_KEY
mocked_scomm.ws_name = self.WS_NAME
mocked_scomm.ws_key = self.WS_KEY
else:
mocked_scomm.gd_client = None
mocked_scomm.ss_key = None
mocked_scomm.ws_name = None
mocked_scomm.ws_key = None
return mocked_scomm
def NewScomm(self, gd_client=None, connect=True):
"""Return a non-mocked SpreadsheetComm."""
scomm = gdata_lib.SpreadsheetComm()
scomm.gd_client = gd_client
if connect:
scomm.ss_key = self.SS_KEY
scomm.ws_name = self.WS_NAME
scomm.ws_key = self.WS_KEY
else:
scomm.ss_key = None
scomm.ws_name = None
scomm.ws_key = None
return scomm
def GenerateCreds(self, skip_user=False, skip_token=False):
creds = gdata_lib.Creds()
if not skip_user:
creds.user = self.USER
creds.password = self.PASSWORD
if not skip_token:
creds.docs_auth_token = self.TOKEN
return creds
def testConnect(self):
mocked_scomm = self.MockScomm(connect=False)
creds = self.GenerateCreds()
# This is the replay script for the test.
mocked_scomm._Login(creds, 'chromiumos')
mocked_scomm.SetCurrentWorksheet(self.WS_NAME, ss_key=self.SS_KEY)
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.Connect(mocked_scomm, creds,
self.SS_KEY, self.WS_NAME)
self.mox.VerifyAll()
def testColumns(self):
"""Test the .columns property. Testing a property gets ugly."""
self.mox.StubOutWithMock(gdata.spreadsheet.service, 'CellQuery')
mocked_gdclient = self.mox.CreateMock(gdata_lib.RetrySpreadsheetsService)
scomm = self.NewScomm(gd_client=mocked_gdclient, connect=True)
query = {'max-row': '1'}
# Simulate a Cells feed from spreadsheet for the column row.
cols = [c[0].upper() + c[1:] for c in self.COLUMNS]
entry = [cros_test_lib.EasyAttr(
content=cros_test_lib.EasyAttr(text=c)) for c in cols]
feed = cros_test_lib.EasyAttr(entry=entry)
# This is the replay script for the test.
gdata.spreadsheet.service.CellQuery().AndReturn(query)
mocked_gdclient.GetCellsFeed(
self.SS_KEY, self.WS_KEY, query=query).AndReturn(feed)
self.mox.ReplayAll()
# This is the test verification.
result = scomm.columns
del scomm # Force deletion now before VerifyAll.
self.mox.VerifyAll()
expected_result = self.COLUMNS
self.assertEquals(expected_result, result)
def testRows(self):
"""Test the .rows property. Testing a property gets ugly."""
mocked_gdclient = self.mox.CreateMock(gdata_lib.RetrySpreadsheetsService)
scomm = self.NewScomm(gd_client=mocked_gdclient, connect=True)
# Simulate a List feed from spreadsheet for all rows.
rows = [
{'col_name': 'Joe', 'col_age': '12', 'col_zip': '12345'},
{'col_name': 'Bob', 'col_age': '15', 'col_zip': '54321'},
]
entry = []
for row in rows:
custom = dict((k, cros_test_lib.EasyAttr(text=v))
for (k, v) in row.iteritems())
entry.append(cros_test_lib.EasyAttr(custom=custom))
feed = cros_test_lib.EasyAttr(entry=entry)
# This is the replay script for the test.
mocked_gdclient.GetListFeed(self.SS_KEY, self.WS_KEY).AndReturn(feed)
self.mox.ReplayAll()
# This is the test verification.
result = scomm.rows
del scomm # Force deletion now before VerifyAll.
self.mox.VerifyAll()
self.assertEquals(tuple(rows), result)
# Result tuple should have spreadsheet row num as attribute on each row.
self.assertEquals(2, result[0].ss_row_num)
self.assertEquals(3, result[1].ss_row_num)
# Result tuple should have spreadsheet row obj as attribute on each row.
self.assertEquals(entry[0], result[0].ss_row_obj)
self.assertEquals(entry[1], result[1].ss_row_obj)
def testSetCurrentWorksheetStart(self):
mocked_scomm = self.MockScomm(connect=True)
# Undo worksheet settings.
mocked_scomm.ss_key = None
mocked_scomm.ws_name = None
mocked_scomm.ws_key = None
# This is the replay script for the test.
mocked_scomm._ClearCache()
mocked_scomm._GetWorksheetKey(
self.SS_KEY, self.WS_NAME).AndReturn(self.WS_KEY)
mocked_scomm._ClearCache()
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.SetCurrentWorksheet(mocked_scomm, self.WS_NAME,
ss_key=self.SS_KEY)
self.mox.VerifyAll()
self.assertEquals(self.SS_KEY, mocked_scomm.ss_key)
self.assertEquals(self.WS_KEY, mocked_scomm.ws_key)
self.assertEquals(self.WS_NAME, mocked_scomm.ws_name)
def testSetCurrentWorksheetRestart(self):
mocked_scomm = self.MockScomm(connect=True)
other_ws_name = 'OtherWSName'
other_ws_key = 'OtherWSKey'
# This is the replay script for the test.
mocked_scomm._GetWorksheetKey(
self.SS_KEY, other_ws_name).AndReturn(other_ws_key)
mocked_scomm._ClearCache()
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.SetCurrentWorksheet(mocked_scomm, other_ws_name)
self.mox.VerifyAll()
self.assertEquals(self.SS_KEY, mocked_scomm.ss_key)
self.assertEquals(other_ws_key, mocked_scomm.ws_key)
self.assertEquals(other_ws_name, mocked_scomm.ws_name)
def testClearCache(self):
rows = 'SomeRows'
cols = 'SomeColumns'
scomm = self.NewScomm()
scomm._rows = rows
scomm._columns = cols
scomm._ClearCache(keep_columns=True)
self.assertTrue(scomm._rows is None)
self.assertEquals(cols, scomm._columns)
scomm._rows = rows
scomm._columns = cols
scomm._ClearCache(keep_columns=False)
self.assertTrue(scomm._rows is None)
self.assertTrue(scomm._columns is None)
scomm._rows = rows
scomm._columns = cols
scomm._ClearCache()
self.assertTrue(scomm._rows is None)
self.assertTrue(scomm._columns is None)
def testLoginWithUserPassword(self):
mocked_scomm = self.MockScomm(connect=False)
creds = self.GenerateCreds(skip_token=True)
self.mox.StubOutClassWithMocks(gdata_lib, 'RetrySpreadsheetsService')
source = 'SomeSource'
# This is the replay script for the test.
mocked_gdclient = gdata_lib.RetrySpreadsheetsService()
mocked_gdclient.ProgrammaticLogin()
mocked_gdclient.GetClientLoginToken().AndReturn(self.TOKEN)
self.mox.ReplayAll()
# This is the test verification.
with self.OutputCapturer():
gdata_lib.SpreadsheetComm._Login(mocked_scomm, creds, source)
self.mox.VerifyAll()
self.assertEquals(self.USER, mocked_gdclient.email)
self.assertEquals(self.PASSWORD, mocked_gdclient.password)
self.assertEquals(self.TOKEN, creds.docs_auth_token)
self.assertEquals(source, mocked_gdclient.source)
self.assertEquals(mocked_gdclient, mocked_scomm.gd_client)
def testLoginWithToken(self):
mocked_scomm = self.MockScomm(connect=False)
creds = self.GenerateCreds(skip_user=True)
self.mox.StubOutClassWithMocks(gdata_lib, 'RetrySpreadsheetsService')
source = 'SomeSource'
# This is the replay script for the test.
mocked_gdclient = gdata_lib.RetrySpreadsheetsService()
mocked_gdclient.SetClientLoginToken(creds.docs_auth_token)
self.mox.ReplayAll()
# This is the test verification.
with self.OutputCapturer():
gdata_lib.SpreadsheetComm._Login(mocked_scomm, creds, source)
self.mox.VerifyAll()
self.assertFalse(hasattr(mocked_gdclient, 'email'))
self.assertFalse(hasattr(mocked_gdclient, 'password'))
self.assertEquals(source, mocked_gdclient.source)
self.assertEquals(mocked_gdclient, mocked_scomm.gd_client)
def testGetWorksheetKey(self):
mocked_scomm = self.MockScomm()
entrylist = [
cros_test_lib.EasyAttr(
title=cros_test_lib.EasyAttr(text='Foo'), id='NotImportant'),
cros_test_lib.EasyAttr(
title=cros_test_lib.EasyAttr(text=self.WS_NAME),
id=cros_test_lib.EasyAttr(text='/some/path/%s' % self.WS_KEY)),
cros_test_lib.EasyAttr(
title=cros_test_lib.EasyAttr(text='Bar'), id='NotImportant'),
]
feed = cros_test_lib.EasyAttr(entry=entrylist)
# This is the replay script for the test.
mocked_scomm.gd_client.GetWorksheetsFeed(self.SS_KEY).AndReturn(feed)
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm._GetWorksheetKey(mocked_scomm,
self.SS_KEY, self.WS_NAME)
self.mox.VerifyAll()
def testGetColumns(self):
mocked_scomm = self.MockScomm()
mocked_scomm.columns = 'SomeColumns'
# Replay script
self.mox.ReplayAll()
# This is the test verification.
result = gdata_lib.SpreadsheetComm.GetColumns(mocked_scomm)
self.mox.VerifyAll()
self.assertEquals('SomeColumns', result)
def testGetColumnIndex(self):
# Note that spreadsheet column indices start at 1.
mocked_scomm = self.MockScomm()
mocked_scomm.columns = ['these', 'are', 'column', 'names']
# This is the replay script for the test.
self.mox.ReplayAll()
# This is the test verification.
result = gdata_lib.SpreadsheetComm.GetColumnIndex(mocked_scomm, 'are')
self.mox.VerifyAll()
self.assertEquals(2, result)
def testGetRows(self):
mocked_scomm = self.MockScomm()
rows = []
for row_ix, row_dict in enumerate(self.ROWS):
rows.append(gdata_lib.SpreadsheetRow('SSRowObj%d' % (row_ix + 2),
(row_ix + 2), row_dict))
mocked_scomm.rows = tuple(rows)
# This is the replay script for the test.
self.mox.ReplayAll()
# This is the test verification.
result = gdata_lib.SpreadsheetComm.GetRows(mocked_scomm)
self.mox.VerifyAll()
self.assertEquals(self.ROWS, result)
for row_ix in xrange(len(self.ROWS)):
self.assertEquals(row_ix + 2, result[row_ix].ss_row_num)
self.assertEquals('SSRowObj%d' % (row_ix + 2), result[row_ix].ss_row_obj)
def testGetRowCacheByCol(self):
mocked_scomm = self.MockScomm()
# This is the replay script for the test.
mocked_scomm.GetRows().AndReturn(self.ROWS)
self.mox.ReplayAll()
# This is the test verification.
result = gdata_lib.SpreadsheetComm.GetRowCacheByCol(mocked_scomm, 'name')
self.mox.VerifyAll()
# Result is a dict of rows by the 'name' column.
for row in self.ROWS:
name = row['name']
self.assertEquals(row, result[name])
def testGetRowCacheByColDuplicates(self):
mocked_scomm = self.MockScomm()
# Create new row list with duplicates by name column.
rows = []
for row in self.ROWS:
new_row = dict(row)
new_row['greeting'] = row['greeting'] + ' there'
rows.append(new_row)
rows.extend(self.ROWS)
# This is the replay script for the test.
mocked_scomm.GetRows().AndReturn(tuple(rows))
self.mox.ReplayAll()
# This is the test verification.
result = gdata_lib.SpreadsheetComm.GetRowCacheByCol(mocked_scomm, 'name')
self.mox.VerifyAll()
# Result is a dict of rows by the 'name' column. In this
# test each result should be a list of the rows with the same
# value in the 'name' column.
num_rows = len(rows)
for ix in xrange(num_rows / 2):
row1 = rows[ix]
row2 = rows[ix + (num_rows / 2)]
name = row1['name']
self.assertEquals(name, row2['name'])
expected_rows = [row1, row2]
self.assertEquals(expected_rows, result[name])
def testInsertRow(self):
mocked_scomm = self.MockScomm()
row = 'TheRow'
# Replay script
mocked_scomm.gd_client.InsertRow(row, mocked_scomm.ss_key,
mocked_scomm.ws_key)
mocked_scomm._ClearCache(keep_columns=True)
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.InsertRow(mocked_scomm, row)
self.mox.VerifyAll()
def testUpdateRowCellByCell(self):
mocked_scomm = self.MockScomm()
rowIx = 5
row = {'a': 123, 'b': 234, 'c': 345}
colIndices = {'a': 1, 'b': None, 'c': 4}
# Replay script
for colName in row:
colIx = colIndices[colName]
mocked_scomm.GetColumnIndex(colName).AndReturn(colIx)
if colIx is not None:
mocked_scomm.ReplaceCellValue(rowIx, colIx, row[colName])
mocked_scomm._ClearCache(keep_columns=True)
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.UpdateRowCellByCell(mocked_scomm, rowIx, row)
self.mox.VerifyAll()
def testDeleteRow(self):
mocked_scomm = self.MockScomm()
ss_row = 'TheRow'
# Replay script
mocked_scomm.gd_client.DeleteRow(ss_row)
mocked_scomm._ClearCache(keep_columns=True)
self.mox.ReplayAll()
# This is the test verification.
gdata_lib.SpreadsheetComm.DeleteRow(mocked_scomm, ss_row)
self.mox.VerifyAll()
def testReplaceCellValue(self):
mocked_scomm = self.MockScomm()
rowIx = 14
colIx = 4
val = 'TheValue'
# Replay script
mocked_scomm.gd_client.UpdateCell(rowIx, colIx, val,
mocked_scomm.ss_key, mocked_scomm.ws_key)
mocked_scomm._ClearCache(keep_columns=True)
self.mox.ReplayAll()
# Verify
gdata_lib.SpreadsheetComm.ReplaceCellValue(mocked_scomm, rowIx, colIx, val)
self.mox.VerifyAll()
def testClearCellValue(self):
mocked_scomm = self.MockScomm()
rowIx = 14
colIx = 4
# Replay script
mocked_scomm.ReplaceCellValue(rowIx, colIx, None)
self.mox.ReplayAll()
# Verify
gdata_lib.SpreadsheetComm.ClearCellValue(mocked_scomm, rowIx, colIx)
self.mox.VerifyAll()
class IssueCommentTest(cros_test_lib.TestCase):
"""Test creating comments."""
def testInit(self):
title = 'Greetings, Earthlings'
text = 'I come in peace.'
ic = gdata_lib.IssueComment(title, text)
self.assertEquals(title, ic.title)
self.assertEquals(text, ic.text)
def createTrackerIssue(tid, labels, owner, status, content, title):
tissue = cros_test_lib.EasyAttr()
tissue.id = cros_test_lib.EasyAttr(
text='http://www/some/path/%d' % tid)
tissue.label = [cros_test_lib.EasyAttr(text=l) for l in labels]
tissue.owner = cros_test_lib.EasyAttr(
username=cros_test_lib.EasyAttr(text=owner))
tissue.status = cros_test_lib.EasyAttr(text=status)
tissue.content = cros_test_lib.EasyAttr(text=content)
tissue.title = cros_test_lib.EasyAttr(text=title)
return tissue
class IssueTest(cros_test_lib.MoxTestCase):
"""Test creating a bug."""
def testInitOverride(self):
owner = 'somedude@chromium.org'
status = 'Assigned'
issue = gdata_lib.Issue(owner=owner, status=status)
self.assertEquals(owner, issue.owner)
self.assertEquals(status, issue.status)
def testInitInvalidOverride(self):
self.assertRaises(ValueError, gdata_lib.Issue,
foobar='NotARealAttr')
def testInitFromTracker(self):
# Need to create a dummy Tracker Issue object.
tissue_id = 123
tissue_labels = ['Iteration-10', 'Effort-2']
tissue_owner = 'thedude@chromium.org'
tissue_status = 'Available'
tissue_content = 'The summary message'
tissue_title = 'The Big Title'
tissue = createTrackerIssue(tid=tissue_id, labels=tissue_labels,
owner=tissue_owner, status=tissue_status,
content=tissue_content, title=tissue_title)
mocked_issue = self.mox.CreateMock(gdata_lib.Issue)
# Replay script
mocked_issue.GetTrackerIssueComments(tissue_id, 'TheProject').AndReturn([])
self.mox.ReplayAll()
# Verify
gdata_lib.Issue.InitFromTracker(mocked_issue, tissue, 'TheProject')
self.mox.VerifyAll()
self.assertEquals(tissue_id, mocked_issue.id)
self.assertEquals(tissue_labels, mocked_issue.labels)
self.assertEquals(tissue_owner, mocked_issue.owner)
self.assertEquals(tissue_status, mocked_issue.status)
self.assertEquals(tissue_content, mocked_issue.summary)
self.assertEquals(tissue_title, mocked_issue.title)
self.assertEquals([], mocked_issue.comments)
class TrackerCommTest(cros_test_lib.MoxOutputTestCase):
"""Test bug tracker communication."""
def testConnectEmail(self):
source = 'TheSource'
token = 'TheToken'
creds = gdata_lib.Creds()
creds.user = 'dude'
creds.password = 'shhh'
creds.tracker_auth_token = None
self.mox.StubOutClassWithMocks(gd_ph_client, 'ProjectHostingClient')
mocked_tcomm = self.mox.CreateMock(gdata_lib.TrackerComm)
def set_token(*_args, **_kwargs):
mocked_itclient.auth_token = cros_test_lib.EasyAttr(token_string=token)
# Replay script
mocked_itclient = gd_ph_client.ProjectHostingClient()
mocked_itclient.ClientLogin(
creds.user, creds.password, source=source, service='code',
account_type='GOOGLE').WithSideEffects(set_token)
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
gdata_lib.TrackerComm.Connect(mocked_tcomm, creds, 'TheProject',
source=source)
self.mox.VerifyAll()
self.assertEquals(mocked_tcomm.it_client, mocked_itclient)
def testConnectToken(self):
source = 'TheSource'
token = 'TheToken'
creds = gdata_lib.Creds()
creds.user = 'dude'
creds.password = 'shhh'
creds.tracker_auth_token = token
mocked_tcomm = self.mox.CreateMock(gdata_lib.TrackerComm)
self.mox.StubOutClassWithMocks(gd_ph_client, 'ProjectHostingClient')
self.mox.StubOutClassWithMocks(gdata.gauth, 'ClientLoginToken')
# Replay script
mocked_itclient = gd_ph_client.ProjectHostingClient()
mocked_token = gdata.gauth.ClientLoginToken(token)
self.mox.ReplayAll()
# Verify
with self.OutputCapturer():
gdata_lib.TrackerComm.Connect(mocked_tcomm, creds, 'TheProject',
source=source)
self.mox.VerifyAll()
self.assertEquals(mocked_tcomm.it_client, mocked_itclient)
self.assertEquals(mocked_itclient.auth_token, mocked_token)
def testGetTrackerIssueById(self):
mocked_itclient = self.mox.CreateMock(gd_ph_client.ProjectHostingClient)
tcomm = gdata_lib.TrackerComm()
tcomm.it_client = mocked_itclient
tcomm.project_name = 'TheProject'
self.mox.StubOutClassWithMocks(gd_ph_client, 'Query')
self.mox.StubOutClassWithMocks(gdata_lib, 'Issue')
self.mox.StubOutWithMock(gdata_lib.Issue, 'InitFromTracker')
issue_id = 12345
feed = cros_test_lib.EasyAttr(entry=['hi', 'there'])
# Replay script
mocked_query = gd_ph_client.Query(issue_id=str(issue_id))
mocked_itclient.get_issues(
'TheProject', query=mocked_query).AndReturn(feed)
mocked_issue = gdata_lib.Issue()
mocked_issue.InitFromTracker(feed.entry[0], 'TheProject')
self.mox.ReplayAll()
# Verify
issue = tcomm.GetTrackerIssueById(issue_id)
self.mox.VerifyAll()
self.assertEquals(mocked_issue, issue)
def testGetTrackerIssuesByText(self):
author = 'TheAuthor'
project = 'TheProject'
text = "find me"
# Set up the fake tracker issue.
tissue_id = 1
tissue_labels = ['auto-filed']
tissue_owner = 'someone@chromium.org'
tissue_status = 'Available'
tissue_content = 'find me in body'
tissue_title = 'find me in title'
tissue = createTrackerIssue(tid=tissue_id, labels=tissue_labels,
owner=tissue_owner, status=tissue_status,
content=tissue_content, title=tissue_title)
issue = gdata_lib.Issue(id=tissue_id, labels=tissue_labels,
owner=tissue_owner, status=tissue_status,
title=tissue_title, summary=tissue_content)
# This will get called as part of Issue.InitFromTracker.
self.mox.StubOutWithMock(gdata_lib.Issue, 'GetTrackerIssueComments')
mocked_itclient = self.mox.CreateMock(gd_ph_client.ProjectHostingClient)
tcomm = gdata_lib.TrackerComm()
tcomm.author = author
tcomm.it_client = mocked_itclient
tcomm.project_name = project
# We expect a Query instance to be passed into get_issues.
# pylint: disable=E1120
self.mox.StubOutClassWithMocks(gd_ph_client, 'Query')
mocked_query = gd_ph_client.Query(text_query='%s is:open' % text)
feed = cros_test_lib.EasyAttr(entry=[tissue])
mocked_itclient.get_issues(project, query=mocked_query).AndReturn(feed)
gdata_lib.Issue.GetTrackerIssueComments(1, project).AndReturn([])
self.mox.ReplayAll()
issues = tcomm.GetTrackerIssuesByText(text)
self.assertEquals(issues, [issue])
def testCreateTrackerIssue(self):
author = 'TheAuthor'
mocked_itclient = self.mox.CreateMock(gd_ph_client.ProjectHostingClient)
mocked_tcomm = self.mox.CreateMock(gdata_lib.TrackerComm)
mocked_tcomm.author = author
mocked_tcomm.it_client = mocked_itclient
mocked_tcomm.project_name = 'TheProject'
issue = cros_test_lib.EasyAttr(title='TheTitle',
summary='TheSummary',
status='TheStatus',
owner='TheOwner',
labels='TheLabels',
ccs=[])
# Replay script
issue_id = cros_test_lib.EasyAttr(
id=cros_test_lib.EasyAttr(text='foo/bar/123'))
mocked_itclient.add_issue(
project_name='TheProject',
title=issue.title,
content=issue.summary,
author=author,
status=issue.status,
owner=issue.owner,
labels=issue.labels,
ccs=issue.ccs).AndReturn(issue_id)
self.mox.ReplayAll()
# Verify
result = gdata_lib.TrackerComm.CreateTrackerIssue(mocked_tcomm, issue)
self.mox.VerifyAll()
self.assertEquals(123, result)
def testAppendTrackerIssueById(self):
author = 'TheAuthor'
project_name = 'TheProject'
mocked_itclient = self.mox.CreateMock(gd_ph_client.ProjectHostingClient)
mocked_tcomm = self.mox.CreateMock(gdata_lib.TrackerComm)
mocked_tcomm.author = author
mocked_tcomm.it_client = mocked_itclient
mocked_tcomm.project_name = project_name
issue_id = 54321
comment = 'TheComment'
# Replay script
mocked_itclient.update_issue(project_name=project_name,
issue_id=issue_id,
author=author,
comment=comment,
owner=None)
self.mox.ReplayAll()
# Verify
result = gdata_lib.TrackerComm.AppendTrackerIssueById(mocked_tcomm,
issue_id, comment)
self.mox.VerifyAll()
self.assertEquals(issue_id, result)
class RetrySpreadsheetsServiceTest(cros_test_lib.MoxOutputTestCase):
"""Test Spreadsheet server retry helper."""
def testRequest(self):
"""Test that calling request method invokes _RetryRequest wrapper."""
# pylint: disable=W0212
self.mox.StubOutWithMock(gdata_lib.RetrySpreadsheetsService,
'_RetryRequest')
# Use a real RetrySpreadsheetsService object rather than a mocked
# one, because the .request method only exists if __init__ is run.
# Also split up __new__ and __init__ in order to grab the original
# rss.request method (inherited from base class at that point).
rss = gdata_lib.RetrySpreadsheetsService.__new__(
gdata_lib.RetrySpreadsheetsService)
orig_request = rss.request
rss.__init__()
args = ('GET', 'http://foo.bar')
# This is the replay script for the test.
gdata_lib.RetrySpreadsheetsService._RetryRequest(
orig_request, *args).AndReturn('wrapped')
self.mox.ReplayAll()
# This is the test verification.
retval = rss.request(*args)
self.mox.VerifyAll()
self.assertEquals('wrapped', retval)
def _TestHttpClientRetryRequest(self, statuses):
"""Test retry logic in http_client request during ProgrammaticLogin.
|statuses| is list of http codes to simulate, where 200 means success.
"""
expect_success = statuses[-1] == 200
self.mox.StubOutWithMock(atom.http.ProxiedHttpClient, 'request')
rss = gdata_lib.RetrySpreadsheetsService()
args = ('POST', 'https://www.google.com/accounts/ClientLogin')
def _read():
return 'Some response text'
# This is the replay script for the test.
# Simulate the return codes in statuses.
for status in statuses:
retstatus = cros_test_lib.EasyAttr(status=status, read=_read)
atom.http.ProxiedHttpClient.request(
*args, data=mox.IgnoreArg(),
headers=mox.IgnoreArg()).AndReturn(retstatus)
self.mox.ReplayAll()
# This is the test verification.
with self.OutputCapturer():
if expect_success:
rss.ProgrammaticLogin()
else:
self.assertRaises(gdata.service.Error, rss.ProgrammaticLogin)
self.mox.VerifyAll()
if not expect_success:
# Retries did not help, request still failed.
regexp = re.compile(r'^Giving up on HTTP request')
self.AssertOutputContainsWarning(regexp=regexp)
elif len(statuses) > 1:
# Warning expected if retries were needed.
self.AssertOutputContainsWarning()
else:
# First try worked, expect no warnings.
self.AssertOutputContainsWarning(invert=True)
def testHttpClientRetryRequest(self):
self._TestHttpClientRetryRequest([200])
def testHttpClientRetryRequest403(self):
self._TestHttpClientRetryRequest([403, 200])
def testHttpClientRetryRequest403x2(self):
self._TestHttpClientRetryRequest([403, 403, 200])
def testHttpClientRetryRequest403x3(self):
self._TestHttpClientRetryRequest([403, 403, 403, 200])
def testHttpClientRetryRequest403x4(self):
self._TestHttpClientRetryRequest([403, 403, 403, 403, 200])
def testHttpClientRetryRequest403x5(self):
# This one should exhaust the retries.
self._TestHttpClientRetryRequest([403, 403, 403, 403, 403])
def _TestRetryRequest(self, statuses):
"""Test retry logic for request method.
|statuses| is list of http codes to simulate, where 200 means success.
"""
expect_success = statuses[-1] == 200
expected_status_index = len(statuses) - 1 if expect_success else 0
mocked_ss = self.mox.CreateMock(gdata_lib.RetrySpreadsheetsService)
args = ('GET', 'http://foo.bar')
# This is the replay script for the test.
for ix, status in enumerate(statuses):
# Add index of status to track which status the request function is
# returning. It is expected to return the last return status if
# successful (retries or not), but first return status if failed.
retval = cros_test_lib.EasyAttr(status=status, index=ix)
mocked_ss.request(*args).AndReturn(retval)
self.mox.ReplayAll()
# This is the test verification.
with self.OutputCapturer():
# pylint: disable=W0212
rval = gdata_lib.RetrySpreadsheetsService._RetryRequest(mocked_ss,
mocked_ss.request,
*args)
self.mox.VerifyAll()
self.assertEquals(statuses[expected_status_index], rval.status)
self.assertEquals(expected_status_index, rval.index)
if not expect_success:
# Retries did not help, request still failed.
regexp = re.compile(r'^Giving up on HTTP request')
self.AssertOutputContainsWarning(regexp=regexp)
elif expected_status_index > 0:
# Warning expected if retries were needed.
self.AssertOutputContainsWarning()
else:
# First try worked, expect no warnings.
self.AssertOutputContainsWarning(invert=True)
def testRetryRequest(self):
self._TestRetryRequest([200])
def testRetryRequest403(self):
self._TestRetryRequest([403, 200])
def testRetryRequest403x2(self):
self._TestRetryRequest([403, 403, 200])
def testRetryRequest403x3(self):
self._TestRetryRequest([403, 403, 403, 200])
def testRetryRequest403x4(self):
self._TestRetryRequest([403, 403, 403, 403, 200])
def testRetryRequest403x5(self):
# This one should exhaust the retries.
self._TestRetryRequest([403, 403, 403, 403, 403])
| bsd-3-clause |
mitsuhiko/logbook | tests/test_syslog_handler.py | 2 | 2146 | import os
import re
import socket
from contextlib import closing
import logbook
import pytest
UNIX_SOCKET = "/tmp/__unixsock_logbook.test"
DELIMITERS = {
socket.AF_INET: '\n'
}
TO_TEST = [
(socket.AF_INET, socket.SOCK_DGRAM, ('127.0.0.1', 0)),
(socket.AF_INET, socket.SOCK_STREAM, ('127.0.0.1', 0)),
]
UNIX_SOCKET_AVAILABLE = hasattr(socket, 'AF_UNIX')
if UNIX_SOCKET_AVAILABLE:
DELIMITERS[socket.AF_UNIX] = '\x00'
TO_TEST.append((socket.AF_UNIX, socket.SOCK_DGRAM, UNIX_SOCKET))
@pytest.mark.usefixtures("unix_sock_path")
@pytest.mark.parametrize("sock_family,socktype,address", TO_TEST)
@pytest.mark.parametrize("app_name", [None, 'Testing'])
def test_syslog_handler(logger, activation_strategy, sock_family, socktype, address, app_name):
delimiter = DELIMITERS[sock_family]
with closing(socket.socket(sock_family, socktype)) as inc:
inc.bind(address)
if socktype == socket.SOCK_STREAM:
inc.listen(0)
inc.settimeout(1)
if UNIX_SOCKET_AVAILABLE and sock_family == socket.AF_UNIX:
expected = (r'^<12>%stestlogger: Syslog is weird%s$' % (app_name + ':' if app_name else '', delimiter))
else:
expected = (r'^<12>1 \d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z %s %s %d - - %sSyslog is weird%s$' % (
socket.gethostname(),
app_name if app_name else 'testlogger',
os.getpid(), 'testlogger: ' if app_name else '',
delimiter))
handler = logbook.SyslogHandler(app_name, inc.getsockname(), socktype=socktype)
with activation_strategy(handler):
logger.warn('Syslog is weird')
if socktype == socket.SOCK_STREAM:
with closing(inc.accept()[0]) as inc2:
rv = inc2.recv(1024)
else:
rv = inc.recvfrom(1024)[0]
rv = rv.decode('utf-8')
assert re.match(expected, rv), \
'expected {}, got {}'.format(expected, rv)
@pytest.fixture
def unix_sock_path():
try:
yield UNIX_SOCKET
finally:
if os.path.exists(UNIX_SOCKET):
os.unlink(UNIX_SOCKET)
| bsd-3-clause |
ghickman/django | django/conf/locale/fr/formats.py | 504 | 1454 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss [fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss [fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
mtnman38/Aggregate | Executables/Aggregate 0.8.8 for Macintosh.app/Contents/Resources/lib/python2.7/requests/packages/charade/langbulgarianmodel.py | 172 | 13013 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-2.0 |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/site-packages/urwid-1.3.0-py2.7-linux-x86_64.egg/urwid/tests/test_text_layout.py | 26 | 10642 | import unittest
from urwid import text_layout
from urwid.compat import B
import urwid
class CalcBreaksTest(object):
def cbtest(self, width, exp):
result = text_layout.default_layout.calculate_text_segments(
B(self.text), width, self.mode )
assert len(result) == len(exp), repr((result, exp))
for l,e in zip(result, exp):
end = l[-1][-1]
assert end == e, repr((result,exp))
def test(self):
for width, exp in self.do:
self.cbtest( width, exp )
class CalcBreaksCharTest(CalcBreaksTest, unittest.TestCase):
mode = 'any'
text = "abfghsdjf askhtrvs\naltjhgsdf ljahtshgf"
# tests
do = [
( 100, [18,38] ),
( 6, [6, 12, 18, 25, 31, 37, 38] ),
( 10, [10, 18, 29, 38] ),
]
class CalcBreaksDBCharTest(CalcBreaksTest, unittest.TestCase):
def setUp(self):
urwid.set_encoding("euc-jp")
mode = 'any'
text = "abfgh\xA1\xA1j\xA1\xA1xskhtrvs\naltjhgsdf\xA1\xA1jahtshgf"
# tests
do = [
( 10, [10, 18, 28, 38] ),
( 6, [5, 11, 17, 18, 25, 31, 37, 38] ),
( 100, [18, 38]),
]
class CalcBreaksWordTest(CalcBreaksTest, unittest.TestCase):
mode = 'space'
text = "hello world\nout there. blah"
# tests
do = [
( 10, [5, 11, 22, 27] ),
( 5, [5, 11, 17, 22, 27] ),
( 100, [11, 27] ),
]
class CalcBreaksWordTest2(CalcBreaksTest, unittest.TestCase):
mode = 'space'
text = "A simple set of words, really...."
do = [
( 10, [8, 15, 22, 33]),
( 17, [15, 33]),
( 13, [12, 22, 33]),
]
class CalcBreaksDBWordTest(CalcBreaksTest, unittest.TestCase):
def setUp(self):
urwid.set_encoding("euc-jp")
mode = 'space'
text = "hel\xA1\xA1 world\nout-\xA1\xA1tre blah"
# tests
do = [
( 10, [5, 11, 21, 26] ),
( 5, [5, 11, 16, 21, 26] ),
( 100, [11, 26] ),
]
class CalcBreaksUTF8Test(CalcBreaksTest, unittest.TestCase):
def setUp(self):
urwid.set_encoding("utf-8")
mode = 'space'
text = '\xe6\x9b\xbf\xe6\xb4\xbc\xe6\xb8\x8e\xe6\xba\x8f\xe6\xbd\xba'
do = [
(4, [6, 12, 15] ),
(10, [15] ),
(5, [6, 12, 15] ),
]
class CalcBreaksCantDisplayTest(unittest.TestCase):
def test(self):
urwid.set_encoding("euc-jp")
self.assertRaises(text_layout.CanNotDisplayText,
text_layout.default_layout.calculate_text_segments,
B('\xA1\xA1'), 1, 'space' )
urwid.set_encoding("utf-8")
self.assertRaises(text_layout.CanNotDisplayText,
text_layout.default_layout.calculate_text_segments,
B('\xe9\xa2\x96'), 1, 'space' )
class SubsegTest(unittest.TestCase):
def setUp(self):
urwid.set_encoding("euc-jp")
def st(self, seg, text, start, end, exp):
text = B(text)
s = urwid.LayoutSegment(seg)
result = s.subseg( text, start, end )
assert result == exp, "Expected %r, got %r"%(exp,result)
def test1_padding(self):
self.st( (10, None), "", 0, 8, [(8, None)] )
self.st( (10, None), "", 2, 10, [(8, None)] )
self.st( (10, 0), "", 3, 7, [(4, 0)] )
self.st( (10, 0), "", 0, 20, [(10, 0)] )
def test2_text(self):
self.st( (10, 0, B("1234567890")), "", 0, 8, [(8,0,B("12345678"))] )
self.st( (10, 0, B("1234567890")), "", 2, 10, [(8,0,B("34567890"))] )
self.st( (10, 0, B("12\xA1\xA156\xA1\xA190")), "", 2, 8,
[(6, 0, B("\xA1\xA156\xA1\xA1"))] )
self.st( (10, 0, B("12\xA1\xA156\xA1\xA190")), "", 3, 8,
[(5, 0, B(" 56\xA1\xA1"))] )
self.st( (10, 0, B("12\xA1\xA156\xA1\xA190")), "", 2, 7,
[(5, 0, B("\xA1\xA156 "))] )
self.st( (10, 0, B("12\xA1\xA156\xA1\xA190")), "", 3, 7,
[(4, 0, B(" 56 "))] )
self.st( (10, 0, B("12\xA1\xA156\xA1\xA190")), "", 0, 20,
[(10, 0, B("12\xA1\xA156\xA1\xA190"))] )
def test3_range(self):
t = "1234567890"
self.st( (10, 0, 10), t, 0, 8, [(8, 0, 8)] )
self.st( (10, 0, 10), t, 2, 10, [(8, 2, 10)] )
self.st( (6, 2, 8), t, 1, 6, [(5, 3, 8)] )
self.st( (6, 2, 8), t, 0, 5, [(5, 2, 7)] )
self.st( (6, 2, 8), t, 1, 5, [(4, 3, 7)] )
t = "12\xA1\xA156\xA1\xA190"
self.st( (10, 0, 10), t, 0, 8, [(8, 0, 8)] )
self.st( (10, 0, 10), t, 2, 10, [(8, 2, 10)] )
self.st( (6, 2, 8), t, 1, 6, [(1, 3), (4, 4, 8)] )
self.st( (6, 2, 8), t, 0, 5, [(4, 2, 6), (1, 6)] )
self.st( (6, 2, 8), t, 1, 5, [(1, 3), (2, 4, 6), (1, 6)] )
class CalcTranslateTest(object):
def setUp(self):
urwid.set_encoding("utf-8")
def test1_left(self):
result = urwid.default_layout.layout( self.text,
self.width, 'left', self.mode)
assert result == self.result_left, result
def test2_right(self):
result = urwid.default_layout.layout( self.text,
self.width, 'right', self.mode)
assert result == self.result_right, result
def test3_center(self):
result = urwid.default_layout.layout( self.text,
self.width, 'center', self.mode)
assert result == self.result_center, result
class CalcTranslateCharTest(CalcTranslateTest, unittest.TestCase):
text = "It's out of control!\nYou've got to"
mode = 'any'
width = 15
result_left = [
[(15, 0, 15)],
[(5, 15, 20), (0, 20)],
[(13, 21, 34), (0, 34)]]
result_right = [
[(15, 0, 15)],
[(10, None), (5, 15, 20), (0,20)],
[(2, None), (13, 21, 34), (0,34)]]
result_center = [
[(15, 0, 15)],
[(5, None), (5, 15, 20), (0,20)],
[(1, None), (13, 21, 34), (0,34)]]
class CalcTranslateWordTest(CalcTranslateTest, unittest.TestCase):
text = "It's out of control!\nYou've got to"
mode = 'space'
width = 14
result_left = [
[(11, 0, 11), (0, 11)],
[(8, 12, 20), (0, 20)],
[(13, 21, 34), (0, 34)]]
result_right = [
[(3, None), (11, 0, 11), (0, 11)],
[(6, None), (8, 12, 20), (0, 20)],
[(1, None), (13, 21, 34), (0, 34)]]
result_center = [
[(2, None), (11, 0, 11), (0, 11)],
[(3, None), (8, 12, 20), (0, 20)],
[(1, None), (13, 21, 34), (0, 34)]]
class CalcTranslateWordTest2(CalcTranslateTest, unittest.TestCase):
text = "It's out of control!\nYou've got to "
mode = 'space'
width = 14
result_left = [
[(11, 0, 11), (0, 11)],
[(8, 12, 20), (0, 20)],
[(14, 21, 35), (0, 35)]]
result_right = [
[(3, None), (11, 0, 11), (0, 11)],
[(6, None), (8, 12, 20), (0, 20)],
[(14, 21, 35), (0, 35)]]
result_center = [
[(2, None), (11, 0, 11), (0, 11)],
[(3, None), (8, 12, 20), (0, 20)],
[(14, 21, 35), (0, 35)]]
class CalcTranslateWordTest3(CalcTranslateTest, unittest.TestCase):
def setUp(self):
urwid.set_encoding('utf-8')
text = B('\xe6\x9b\xbf\xe6\xb4\xbc\n\xe6\xb8\x8e\xe6\xba\x8f\xe6\xbd\xba')
width = 10
mode = 'space'
result_left = [
[(4, 0, 6), (0, 6)],
[(6, 7, 16), (0, 16)]]
result_right = [
[(6, None), (4, 0, 6), (0, 6)],
[(4, None), (6, 7, 16), (0, 16)]]
result_center = [
[(3, None), (4, 0, 6), (0, 6)],
[(2, None), (6, 7, 16), (0, 16)]]
class CalcTranslateWordTest4(CalcTranslateTest, unittest.TestCase):
text = ' Die Gedank'
width = 3
mode = 'space'
result_left = [
[(0, 0)],
[(3, 1, 4), (0, 4)],
[(3, 5, 8)],
[(3, 8, 11), (0, 11)]]
result_right = [
[(3, None), (0, 0)],
[(3, 1, 4), (0, 4)],
[(3, 5, 8)],
[(3, 8, 11), (0, 11)]]
result_center = [
[(2, None), (0, 0)],
[(3, 1, 4), (0, 4)],
[(3, 5, 8)],
[(3, 8, 11), (0, 11)]]
class CalcTranslateWordTest5(CalcTranslateTest, unittest.TestCase):
text = ' Word.'
width = 3
mode = 'space'
result_left = [[(3, 0, 3)], [(3, 3, 6), (0, 6)]]
result_right = [[(3, 0, 3)], [(3, 3, 6), (0, 6)]]
result_center = [[(3, 0, 3)], [(3, 3, 6), (0, 6)]]
class CalcTranslateClipTest(CalcTranslateTest, unittest.TestCase):
text = "It's out of control!\nYou've got to\n\nturn it off!!!"
mode = 'clip'
width = 14
result_left = [
[(20, 0, 20), (0, 20)],
[(13, 21, 34), (0, 34)],
[(0, 35)],
[(14, 36, 50), (0, 50)]]
result_right = [
[(-6, None), (20, 0, 20), (0, 20)],
[(1, None), (13, 21, 34), (0, 34)],
[(14, None), (0, 35)],
[(14, 36, 50), (0, 50)]]
result_center = [
[(-3, None), (20, 0, 20), (0, 20)],
[(1, None), (13, 21, 34), (0, 34)],
[(7, None), (0, 35)],
[(14, 36, 50), (0, 50)]]
class CalcTranslateCantDisplayTest(CalcTranslateTest, unittest.TestCase):
text = B('Hello\xe9\xa2\x96')
mode = 'space'
width = 1
result_left = [[]]
result_right = [[]]
result_center = [[]]
class CalcPosTest(unittest.TestCase):
def setUp(self):
self.text = "A" * 27
self.trans = [
[(2,None),(7,0,7),(0,7)],
[(13,8,21),(0,21)],
[(3,None),(5,22,27),(0,27)]]
self.mytests = [(1,0, 0), (2,0, 0), (11,0, 7),
(-3,1, 8), (-2,1, 8), (1,1, 9), (31,1, 21),
(1,2, 22), (11,2, 27) ]
def tests(self):
for x,y, expected in self.mytests:
got = text_layout.calc_pos( self.text, self.trans, x, y )
assert got == expected, "%r got:%r expected:%r" % ((x, y), got,
expected)
class Pos2CoordsTest(unittest.TestCase):
pos_list = [5, 9, 20, 26]
text = "1234567890" * 3
mytests = [
( [[(15,0,15)], [(15,15,30),(0,30)]],
[(5,0),(9,0),(5,1),(11,1)] ),
( [[(9,0,9)], [(12,9,21)], [(9,21,30),(0,30)]],
[(5,0),(0,1),(11,1),(5,2)] ),
( [[(2,None), (15,0,15)], [(2,None), (15,15,30),(0,30)]],
[(7,0),(11,0),(7,1),(13,1)] ),
( [[(3, 6, 9),(0,9)], [(5, 20, 25),(0,25)]],
[(0,0),(3,0),(0,1),(5,1)] ),
( [[(10, 0, 10),(0,10)]],
[(5,0),(9,0),(10,0),(10,0)] ),
]
def test(self):
for t, answer in self.mytests:
for pos,a in zip(self.pos_list,answer) :
r = text_layout.calc_coords( self.text, t, pos)
assert r==a, "%r got: %r expected: %r"%(t,r,a)
| apache-2.0 |
yongshengwang/hue | desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py | 153 | 31419 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import random
import re
import socket
import time
import urlparse
from hashlib import md5
from boto import config, UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from boto.s3.keyfile import KeyFile
"""
Handler for Google Cloud Storage resumable uploads. See
http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry failed uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
The caller can optionally specify a tracker_file_name param in the
ResumableUploadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
"""
class ResumableUploadHandler(object):
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating server has nothing (upload protocol uses
# inclusive numbering).
SERVER_HAS_NOTHING = (0, -1)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each uploaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracker URI.
If supplied and the current process fails the upload, it can be
retried in a new process. If called with an existing file containing
a valid tracker URI, we'll resume the upload from this URI; else
we'll start a new resumable upload (and write the URI to this
tracker file).
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable upload
making no progress. (Count resets every time we get progress, so
upload can span many more than this number of retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.server_has_bytes = 0 # Byte count at last server check.
self.tracker_uri = None
if tracker_file_name:
self._load_tracker_uri_from_file()
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
def _load_tracker_uri_from_file(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
uri = f.readline().strip()
self._set_tracker_uri(uri)
except IOError as e:
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because self.tracker_uri is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'upload from scratch.' %
(self.tracker_file_name, e.strerror))
except InvalidUriError as e:
# Warn user, but proceed (will restart because
# self.tracker_uri is None).
print('Invalid tracker URI (%s) found in URI tracker file '
'(%s). Restarting upload from scratch.' %
(uri, self.tracker_file_name))
finally:
if f:
f.close()
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
"""
if not self.tracker_file_name:
return
f = None
try:
with os.fdopen(os.open(self.tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.write(self.tracker_uri)
except IOError as e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
def _set_tracker_uri(self, uri):
"""
Called when we start a new resumable upload or get a new tracker
URI for the upload. Saves URI and resets upload state.
Raises InvalidUriError if URI is syntactically invalid.
"""
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
self.tracker_uri_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.server_has_bytes = 0
def get_tracker_uri(self):
"""
Returns upload tracker URI, or None if the upload has not yet started.
"""
return self.tracker_uri
def get_upload_id(self):
"""
Returns the upload ID for the resumable upload, or None if the upload
has not yet started.
"""
# We extract the upload_id from the tracker uri. We could retrieve the
# upload_id from the headers in the response but this only works for
# the case where we get the tracker uri from the service. In the case
# where we get the tracker from the tracking file we need to do this
# logic anyway.
delim = '?upload_id='
if self.tracker_uri and delim in self.tracker_uri:
return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):]
else:
return None
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _build_content_range_header(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _query_server_state(self, conn, file_length):
"""
Queries server to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload server always returns the current start/end
state whenever a PUT doesn't complete.
Returns HTTP response from sending request.
Raises ResumableUploadException if problem querying server.
"""
# Send an empty PUT so that server replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._build_content_range_header('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(conn, 'PUT',
path=self.tracker_uri_path,
auth_path=self.tracker_uri_path,
headers=put_headers,
host=self.tracker_uri_host)
def _query_server_pos(self, conn, file_length):
"""
Queries server to find out what bytes it currently has.
Returns (server_start, server_end), where the values are inclusive.
For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
Raises ResumableUploadException if problem querying server.
"""
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the server has the complete
# file, we return (server_start, file_length-1). That way the
# calling code can always simply read up through server_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether server_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the tracker URI to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# tracker URI to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from server state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search('bytes=(\d+)-(\d+)', range_spec)
if m:
server_start = long(m.group(1))
server_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the server does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the server
# has byte 0, omitting the Range header is used to indicate that
# the server doesn't have any bytes.
return self.SERVER_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload server state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
print('Server has: Range: %d - %d.' % (server_start, server_end))
return (server_start, server_end)
def _start_new_resumable_upload(self, key, headers=None):
"""
Starts a new resumable upload.
Raises ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
print('Starting new resumable upload.')
self.server_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the server to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get tracker URI from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [500, 503]:
# Retry status 500 and 503 errors after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
tracker_uri = resp.getheader('Location')
if not tracker_uri:
raise ResumableUploadException(
'No resumable tracker URI found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._set_tracker_uri(tracker_uri)
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
if not headers:
put_headers = {}
else:
put_headers = headers.copy()
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._build_content_range_header(
'*', file_length)
else:
range_header = self._build_content_range_header(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
headers=put_headers, host=self.tracker_uri_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
for alg in self.digesters:
self.digesters[alg].update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
num_cb):
"""
Attempts a resumable upload.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
(server_start, server_end) = self.SERVER_HAS_NOTHING
conn = key.bucket.connection
if self.tracker_uri:
# Try to resume existing resumable upload.
try:
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
if server_end:
# If the server already has some of the content, we need to
# update the digesters with the bytes that have already been
# uploaded to ensure we get a complete hash in the end.
print('Catching up hash digest(s) for resumed upload')
fp.seek(0)
# Read local file's bytes through position server has. For
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
bytes_to_go = server_end + 1
while bytes_to_go:
chunk = fp.read(min(key.BufferSize, bytes_to_go))
if not chunk:
raise ResumableUploadException(
'Hit end of file during resumable upload hash '
'catchup. This should not happen under\n'
'normal circumstances, as it indicates the '
'server has more bytes of this transfer\nthan'
' the current file size. Restarting upload.',
ResumableTransferDisposition.START_OVER)
for alg in self.digesters:
self.digesters[alg].update(chunk)
bytes_to_go -= len(chunk)
if conn.debug >= 1:
print('Resuming transfer.')
except ResumableUploadException as e:
if conn.debug >= 1:
print('Unable to resume transfer (%s).' % e.message)
self._start_new_resumable_upload(key, headers)
else:
self._start_new_resumable_upload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = server_end
total_bytes_uploaded = server_end + 1
# Corner case: Don't attempt to seek if we've already uploaded the
# entire file, because if the file is a stream (e.g., the KeyFile
# wrapper around input key when copying between providers), attempting
# to seek to the end of file would result in an InvalidRange error.
if file_length < total_bytes_uploaded:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through server will terminate current upload
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
'can happen for various reasons, including specifying an '
'invalid request (e.g., an invalid canned ACL) or if the '
'file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def _check_final_md5(self, key, etag):
"""
Checks that etag from server agrees with md5 computed before upload.
This is important, since the upload could have spanned a number of
hours and multiple processes (e.g., gsutil runs), and the user could
change some of the file and not realize they have inconsistent data.
"""
if key.bucket.connection.debug >= 1:
print('Checking md5 against etag.')
if key.md5 != etag.strip('"\''):
# Call key.open_read() before attempting to delete the
# (incorrect-content) key, so we perform that request on a
# different HTTP connection. This is neededb because httplib
# will return a "Response not ready" error if you try to perform
# a second transaction on the connection.
key.open_read()
key.close()
key.delete()
raise ResumableUploadException(
'File changed during upload: md5 signature doesn\'t match etag '
'(incorrect uploaded object deleted)',
ResumableTransferDisposition.ABORT)
def handle_resumable_upload_exception(self, e, debug):
if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file' % e.message)
raise
elif (e.disposition == ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file' % e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableUploadException (%s) - will retry' %
e.message)
def track_progress_less_iterations(self, server_had_bytes_before_attempt,
roll_back_md5=True, debug=0):
# At this point we had a re-tryable failure; see if made progress.
if self.server_has_bytes > server_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if roll_back_md5:
# Rollback any potential hash updates, as we did not
# make any progress in this iteration.
self.digesters = self.digesters_before_attempt
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = random.random() * (2**self.progress_less_iterations)
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying' %
(self.progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object to which data is to be uploaded
:type fp: file-like object
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary mapping hash algorithm
descriptions to corresponding state-ful hashing objects that
implement update(), digest(), and copy() (e.g. hashlib.md5()).
Defaults to {'md5': md5()}.
Raises ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
CT = 'Content-Type'
if CT in headers and headers[CT] is None:
del headers[CT]
headers['User-Agent'] = UserAgent
# Determine file size different ways for case where fp is actually a
# wrapper around a Key vs an actual file.
if isinstance(fp, KeyFile):
file_length = fp.getkey().size
else:
fp.seek(0, os.SEEK_END)
file_length = fp.tell()
fp.seek(0)
debug = key.bucket.connection.debug
# Compute the MD5 checksum on the fly.
if hash_algs is None:
hash_algs = {'md5': md5}
self.digesters = dict(
(alg, hash_algs[alg]()) for alg in hash_algs or {})
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
self.digesters_before_attempt = dict(
(alg, self.digesters[alg].copy())
for alg in self.digesters)
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(etag, self.generation, self.metageneration) = (
self._attempt_resumable_upload(key, fp, file_length,
headers, cb, num_cb))
# Get the final digests for the uploaded content.
for alg in self.digesters:
key.local_hashes[alg] = self.digesters[alg].digest()
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
key.generation = self.generation
if debug >= 1:
print('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException as e:
self.handle_resumable_upload_exception(e, debug)
self.track_progress_less_iterations(server_had_bytes_before_attempt,
True, debug)
| apache-2.0 |
WojciechFocus/MMVD | mmvdApp/visualization.py | 1 | 20958 | # coding: utf-8
__author__ = 'wojciech'
import time
from Tkinter import *
class Visualization(Frame):
def __init__(self, map_):
self.master = Tk()
Frame.__init__(self, self.master)
self.magazine_frame = Frame(self.master)
self.magazine_frame.pack(side=LEFT)
self.control_frame = Frame(self.master)
self.control_frame.pack()
self.magazine_len = 500 # szerokosc magazynu
self.magazine_len = 500 # wysokosc magazynu
self.x_dim = len(map_) # wymiar x
self.y_dim = len(map_[0]) # wymiar y
self.max_dim = max(self.y_dim, self.x_dim)
self.robotspeed = 10
self.robot_number = 0
self.tile_len = self.magazine_len / self.max_dim
self.canvas = Canvas(self.magazine_frame, width=self.magazine_len,
height=self.magazine_len)
speed_scale = Scale(self.control_frame,
from_=1,
to=100,
# variable=10,
orient=HORIZONTAL,
length=200,
label="Robot speed",
sliderlength=20,
command=self.speed_scale_command)
speed_scale.set(10)
speed_scale.pack()
stop_button = Button(self.control_frame,
text="STOP",
command=self.end,
borderwidth=3)
stop_button.pack()
self.variable = StringVar(self.master)
self.variable.set("500x500")
self.w = OptionMenu(self.master, self.variable, "100x100", "200x200",
"300x300", "400x400", "500x500", "600x600",
"700x700", "800x800", "900x900")
self.w.pack()
self.canvas.pack()
# Start of drawing magazine.
# Tiles are drawn first, they are on the bottom of magazine
# It is nor recommended to move around tiles but if it is necessary
# Tiles can be accessed by their position starting form (0,0)
for i in range(len(map_)):
for j in range(len(map_[i])):
self.canvas.create_rectangle(self.tile_len * j,
self.tile_len * i,
self.tile_len * j + self.tile_len,
self.tile_len * i + self.tile_len,
fill="grey",
tag="tile" + str(j) + str(i))
self.canvas.update()
# Draws all directions according to the map given to the algorithm
# Tiles can be accessed by their position starting form (0,0)
for i in range(len(map_)):
for j in range(len(map_[i])):
# double arrow in the up direction
if map_[i][j] == 1:
self.canvas.create_line(
self.tile_len * j + 0.35 * self.tile_len,
self.tile_len * i + self.tile_len * 0.15,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + self.tile_len * 0.05,
self.tile_len * j + 0.65 * self.tile_len,
self.tile_len * i + self.tile_len * 0.15,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
self.canvas.create_line(
self.tile_len * j + 0.35 * self.tile_len,
self.tile_len * i + self.tile_len * 0.25,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + self.tile_len * 0.15,
self.tile_len * j + 0.65 * self.tile_len,
self.tile_len * i + self.tile_len * 0.25,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
# Double arrow in the right direction
elif map_[i][j] == 2:
self.canvas.create_line(
self.tile_len * j + 0.85 * self.tile_len,
self.tile_len * i + self.tile_len * 0.35,
self.tile_len * j + 0.95 * self.tile_len,
self.tile_len * i + self.tile_len * 0.5,
self.tile_len * j + 0.85 * self.tile_len,
self.tile_len * i + self.tile_len * 0.65,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
self.canvas.create_line(
self.tile_len * j + 0.75 * self.tile_len,
self.tile_len * i + self.tile_len * 0.35,
self.tile_len * j + 0.85 * self.tile_len,
self.tile_len * i + self.tile_len * 0.5,
self.tile_len * j + 0.75 * self.tile_len,
self.tile_len * i + self.tile_len * 0.65,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
# Double arrow in the down direction
elif map_[i][j] == 3:
self.canvas.create_line(
self.tile_len * j + 0.35 * self.tile_len,
self.tile_len * i + self.tile_len * 0.85,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + self.tile_len * 0.95,
self.tile_len * j + 0.65 * self.tile_len,
self.tile_len * i + self.tile_len * 0.85,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
self.canvas.create_line(
self.tile_len * j + 0.35 * self.tile_len,
self.tile_len * i + self.tile_len * 0.75,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + self.tile_len * 0.85,
self.tile_len * j + 0.65 * self.tile_len,
self.tile_len * i + self.tile_len * 0.75,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
# Double arrow in the left direction
elif map_[i][j] == 4:
self.canvas.create_line(
self.tile_len * j + 0.15 * self.tile_len,
self.tile_len * i + self.tile_len * 0.35,
self.tile_len * j + 0.05 * self.tile_len,
self.tile_len * i + self.tile_len * 0.5,
self.tile_len * j + 0.15 * self.tile_len,
self.tile_len * i + self.tile_len * 0.65,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
self.canvas.create_line(
self.tile_len * j + 0.25 * self.tile_len,
self.tile_len * i + self.tile_len * 0.35,
self.tile_len * j + 0.15 * self.tile_len,
self.tile_len * i + self.tile_len * 0.5,
self.tile_len * j + 0.25 * self.tile_len,
self.tile_len * i + self.tile_len * 0.65,
fill="darkgreen", width=4, tag="dir" + str(j) + str(i)
)
# Exit point as a big dotted X
elif map_[i][j] == 9:
self.canvas.create_polygon(
self.tile_len * j + 0.12 * self.tile_len,
self.tile_len * i + 0.21 * self.tile_len,
self.tile_len * j + 0.21 * self.tile_len,
self.tile_len * i + 0.12 * self.tile_len,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + 0.4 * self.tile_len,
self.tile_len * j + 0.79 * self.tile_len,
self.tile_len * i + 0.12 * self.tile_len,
self.tile_len * j + 0.88 * self.tile_len,
self.tile_len * i + 0.21 * self.tile_len,
self.tile_len * j + 0.6 * self.tile_len,
self.tile_len * i + 0.5 * self.tile_len,
self.tile_len * j + 0.88 * self.tile_len,
self.tile_len * i + 0.79 * self.tile_len,
self.tile_len * j + 0.79 * self.tile_len,
self.tile_len * i + 0.88 * self.tile_len,
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + 0.6 * self.tile_len,
self.tile_len * j + 0.21 * self.tile_len,
self.tile_len * i + 0.88 * self.tile_len,
self.tile_len * j + 0.12 * self.tile_len,
self.tile_len * i + 0.79 * self.tile_len,
self.tile_len * j + 0.4 * self.tile_len,
self.tile_len * i + 0.5 * self.tile_len,
fill="darkred", outline="black", stipple='gray75',
width=3)
else:
self.canvas.create_rectangle(
self.tile_len * j + 0.25 * self.tile_len,
self.tile_len * i + 0.25 * self.tile_len,
self.tile_len * j + 0.75 * self.tile_len,
self.tile_len * i + 0.75 * self.tile_len,
fill="red", tag="shelf" + map_[i][j],
)
self.canvas.create_text(
self.tile_len * j + 0.5 * self.tile_len,
self.tile_len * i + 0.5 * self.tile_len,
anchor="center", text=map_[i][j],
tag="shelf_text" + map_[i][j]
)
self.canvas.update()
def draw_robots(self, robot_list):
"""
Function draws a new robot
It should be used to introduce robots to magazine
Robots are given id that are used to access robot
Id is the position of the robot on robot_list
starting with 0
:param robot_list: List of robots
:return: None
"""
for robot in robot_list:
pos_y, pos_x = robot
self.canvas.create_oval(
pos_x * self.tile_len + self.tile_len * 0.2,
pos_y * self.tile_len + self.tile_len * 0.2,
pos_x * self.tile_len + self.tile_len * 0.8,
pos_y * self.tile_len + self.tile_len * 0.8,
fill="blue", tag="robot" + str(self.robot_number)
)
self.robot_number += 1
def hide_shelf(self, shelf_id):
"""
Removes shelf with shelf_id from view.
Shelf is not removed from memory.
When moved appears in the magazine
:param shelf_id: name of shelf to be hidden
:return:
"""
self.canvas.lower("shelf" + shelf_id)
self.canvas.lower("shelf_text" + shelf_id)
def hide_robot(self, robot_id):
"""
Removes robot with robot_id from view.
Robot is not removed from memory.
When moved appears in the magazine
:param robot_id: name of shelf to be hidden
:return:
"""
self.canvas.lower("robot" + str(robot_id))
def animate(self, update_robots, update_shelfs):
"""
Function makes smooth moves of robots and shelfs in the magazine
Function assumes that both x_len and y_len are the same
:param update_robots: List of all robots positions to be updated
it is a list that has at the first position Id of robot to
be updated(generated with draw robots function),
second number indicates direction
Assumes that movement to be executed is a valid movement
Assumes that all robots exist
:param update_shelfs: List of all shelfs to be updated
each element to lis has at the first position shelf name (Id) of
shelf to be updated, second number indicates direction
0- hide shelf
Assume that movement to be executed is a valid movement
Assume that all shelfs exist
If hidden shelf is moved it shows up in the magazine
:return: None
"""
for i in range(self.tile_len):
time.sleep(.05/self.robotspeed)
for shelf, direction in update_shelfs:
if isinstance(direction, tuple):
# move to absolute position
# CAUTION: axes are reversed, ie. direction = (yPos, xPos)
pos_y, pos_x = direction
# self.canvas.moveTo("shelf" + shelf, direction[1],
# direction[0])
self.canvas.coords(
"shelf" + shelf,
pos_x * self.tile_len + 0.25 * self.tile_len,
pos_y * self.tile_len + 0.25 * self.tile_len,
pos_x * self.tile_len + 0.75 * self.tile_len,
pos_y * self.tile_len + 0.75 * self.tile_len,
)
# self.canvas.moveTo("shelf_text" + shelf, direction[1],
# direction[0])
self.canvas.coords(
"shelf_text" + shelf,
pos_x * self.tile_len + 0.5 * self.tile_len,
pos_y * self.tile_len + 0.5 * self.tile_len,
)
self.canvas.tag_raise("shelf" + shelf)
self.canvas.tag_raise("shelf_text" + shelf)
else:
horizontal = 0
vertical = 0
if direction == 1:
vertical = -1
elif direction == 2:
horizontal = 1
elif direction == 3:
vertical = 1
elif direction == 4:
horizontal = -1
if direction == 0:
self.hide_shelf(shelf)
else:
self.canvas.move('shelf' + shelf, horizontal,
vertical)
self.canvas.move('shelf_text' + shelf, horizontal,
vertical)
self.canvas.tag_raise("shelf" + shelf)
self.canvas.tag_raise("shelf_text" + shelf)
for robot, direction in update_robots:
if isinstance(direction, tuple):
# move to absolute position
# CAUTION: axes are reversed, ie. direction = (yPos, xPos)
pos_y, pos_x = direction
# self.canvas.moveTo("robot" + str(robot), direction[1],
# direction[0])
self.canvas.coords(
"robot" + str(robot),
pos_x * self.tile_len + self.tile_len * 0.2,
pos_y * self.tile_len + self.tile_len * 0.2,
pos_x * self.tile_len + self.tile_len * 0.8,
pos_y * self.tile_len + self.tile_len * 0.8,
)
else:
horizontal = 0
vertical = 0
if direction == 1:
vertical = -1
elif direction == 2:
horizontal = 1
elif direction == 3:
vertical = 1
elif direction == 4:
horizontal = -1
self.canvas.move('robot' + str(robot), horizontal, vertical)
# if direction == 0:
# # print "robot hiding"
# pass
# self.hide_robot(robot)
# else:
# self.canvas.move('robot' + str(robot), horizontal,
# vertical)
self.canvas.update()
# time.clock().
self.resize_magazine(self.variable.get())
time.sleep(0.5/self.robotspeed)
def speed_scale_command(self, value):
self.robotspeed = int(value)
def end(self):
self.mainloop()
def resize_magazine(self, val):
if val == "100x100":
self.canvas.scale("all", 0, 0, 100. / self.magazine_len,
100. / self.magazine_len)
self.magazine_len = 100
self.magazine_len = 100
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "200x200":
self.canvas.scale("all", 0, 0, 200. / self.magazine_len,
200. / self.magazine_len)
self.magazine_len = 200
self.magazine_len = 200
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "300x300":
self.canvas.scale("all", 0, 0, 300. / self.magazine_len,
300. / self.magazine_len)
self.magazine_len = 300
self.magazine_len = 300
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "400x400":
self.canvas.scale("all", 0, 0, 400. / self.magazine_len,
400. / self.magazine_len)
self.magazine_len = 400
self.magazine_len = 400
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "500x500":
self.canvas.scale("all", 0, 0, 500. / self.magazine_len,
500. / self.magazine_len)
self.magazine_len = 500
self.magazine_len = 500
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "600x600":
self.canvas.scale("all", 0, 0, 600. / self.magazine_len,
600. / self.magazine_len)
self.magazine_len = 600
self.magazine_len = 600
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "700x700":
self.canvas.scale("all", 0, 0, 700. / self.magazine_len,
700. / self.magazine_len)
self.magazine_len = 700
self.magazine_len = 700
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "800x800":
self.canvas.scale("all", 0, 0, 800. / self.magazine_len,
800. / self.magazine_len)
self.magazine_len = 800
self.magazine_len = 800
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
elif val == "900x900":
self.canvas.scale("all", 0, 0, 900. / self.magazine_len,
900. / self.magazine_len)
self.magazine_len = 900
self.magazine_len = 900
self.tile_len = self.magazine_len/self.x_dim
self.tile_len = self.magazine_len/self.y_dim
self.canvas.config(width=self.magazine_len,
height=self.magazine_len)
| mit |
dongjoon-hyun/tensorflow | tensorflow/python/keras/utils/io_utils.py | 23 | 4871 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities related to disk I/O."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
import six
from tensorflow.python.util.tf_export import tf_export
try:
import h5py
except ImportError:
h5py = None
@tf_export('keras.utils.HDF5Matrix')
class HDF5Matrix(object):
"""Representation of HDF5 dataset to be used instead of a Numpy array.
Example:
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
Providing `start` and `end` allows use of a slice of the dataset.
Optionally, a normalizer function (or lambda) can be given. This will
be called on every slice of data retrieved.
Arguments:
datapath: string, path to a HDF5 file
dataset: string, name of the HDF5 dataset in the file specified
in datapath
start: int, start of desired slice of the specified dataset
end: int, end of desired slice of the specified dataset
normalizer: function to be called on data when retrieved
Returns:
An array-like HDF5 dataset.
"""
refs = defaultdict(int)
def __init__(self, datapath, dataset, start=0, end=None, normalizer=None):
if h5py is None:
raise ImportError('The use of HDF5Matrix requires '
'HDF5 and h5py installed.')
if datapath not in list(self.refs.keys()):
f = h5py.File(datapath)
self.refs[datapath] = f
else:
f = self.refs[datapath]
self.data = f[dataset]
self.start = start
if end is None:
self.end = self.data.shape[0]
else:
self.end = end
self.normalizer = normalizer
def __len__(self):
return self.end - self.start
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
if start is None:
start = 0
if stop is None:
stop = self.shape[0]
if stop + self.start <= self.end:
idx = slice(start + self.start, stop + self.start)
else:
raise IndexError
elif isinstance(key, (int, np.integer)):
if key + self.start < self.end:
idx = key + self.start
else:
raise IndexError
elif isinstance(key, np.ndarray):
if np.max(key) + self.start < self.end:
idx = (self.start + key).tolist()
else:
raise IndexError
else:
# Assume list/iterable
if max(key) + self.start < self.end:
idx = [x + self.start for x in key]
else:
raise IndexError
if self.normalizer is not None:
return self.normalizer(self.data[idx])
else:
return self.data[idx]
@property
def shape(self):
"""Gets a numpy-style shape tuple giving the dataset dimensions.
Returns:
A numpy-style shape tuple.
"""
return (self.end - self.start,) + self.data.shape[1:]
@property
def dtype(self):
"""Gets the datatype of the dataset.
Returns:
A numpy dtype string.
"""
return self.data.dtype
@property
def ndim(self):
"""Gets the number of dimensions (rank) of the dataset.
Returns:
An integer denoting the number of dimensions (rank) of the dataset.
"""
return self.data.ndim
@property
def size(self):
"""Gets the total dataset size (number of elements).
Returns:
An integer denoting the number of elements in the dataset.
"""
return np.prod(self.shape)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Arguments:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = six.moves.input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath)).strip().lower()
while overwrite not in ('y', 'n'):
overwrite = six.moves.input('Enter "y" (overwrite) or "n" '
'(cancel).').strip().lower()
if overwrite == 'n':
return False
print('[TIP] Next time specify overwrite=True!')
return True
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/wsgi/tests.py | 12 | 4173 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.core.servers.basehttp import get_internal_wsgi_application
from django.core.signals import request_started
from django.core.wsgi import get_wsgi_application
from django.db import close_old_connections
from django.test import SimpleTestCase, override_settings
from django.test.client import RequestFactory
@override_settings(ROOT_URLCONF='wsgi.urls')
class WSGITest(SimpleTestCase):
def setUp(self):
request_started.disconnect(close_old_connections)
def tearDown(self):
request_started.connect(close_old_connections)
def test_get_wsgi_application(self):
"""
get_wsgi_application() returns a functioning WSGI callable.
"""
application = get_wsgi_application()
environ = RequestFactory()._base_environ(
PATH_INFO="/",
CONTENT_TYPE="text/html; charset=utf-8",
REQUEST_METHOD="GET"
)
response_data = {}
def start_response(status, headers):
response_data["status"] = status
response_data["headers"] = headers
response = application(environ, start_response)
self.assertEqual(response_data["status"], "200 OK")
self.assertEqual(
set(response_data["headers"]),
{('Content-Length', '12'), ('Content-Type', 'text/html; charset=utf-8')})
self.assertIn(bytes(response), [
b"Content-Length: 12\r\nContent-Type: text/html; charset=utf-8\r\n\r\nHello World!",
b"Content-Type: text/html; charset=utf-8\r\nContent-Length: 12\r\n\r\nHello World!"
])
def test_file_wrapper(self):
"""
FileResponse uses wsgi.file_wrapper.
"""
class FileWrapper(object):
def __init__(self, filelike, blksize=8192):
filelike.close()
application = get_wsgi_application()
environ = RequestFactory()._base_environ(
PATH_INFO='/file/',
REQUEST_METHOD='GET',
**{'wsgi.file_wrapper': FileWrapper}
)
response_data = {}
def start_response(status, headers):
response_data['status'] = status
response_data['headers'] = headers
response = application(environ, start_response)
self.assertEqual(response_data['status'], '200 OK')
self.assertIsInstance(response, FileWrapper)
class GetInternalWSGIApplicationTest(SimpleTestCase):
@override_settings(WSGI_APPLICATION="wsgi.wsgi.application")
def test_success(self):
"""
If ``WSGI_APPLICATION`` is a dotted path, the referenced object is
returned.
"""
app = get_internal_wsgi_application()
from .wsgi import application
self.assertIs(app, application)
@override_settings(WSGI_APPLICATION=None)
def test_default(self):
"""
If ``WSGI_APPLICATION`` is ``None``, the return value of
``get_wsgi_application`` is returned.
"""
# Mock out get_wsgi_application so we know its return value is used
fake_app = object()
def mock_get_wsgi_app():
return fake_app
from django.core.servers import basehttp
_orig_get_wsgi_app = basehttp.get_wsgi_application
basehttp.get_wsgi_application = mock_get_wsgi_app
try:
app = get_internal_wsgi_application()
self.assertIs(app, fake_app)
finally:
basehttp.get_wsgi_application = _orig_get_wsgi_app
@override_settings(WSGI_APPLICATION="wsgi.noexist.app")
def test_bad_module(self):
msg = "WSGI application 'wsgi.noexist.app' could not be loaded; Error importing"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
@override_settings(WSGI_APPLICATION="wsgi.wsgi.noexist")
def test_bad_name(self):
msg = "WSGI application 'wsgi.wsgi.noexist' could not be loaded; Error importing"
with self.assertRaisesMessage(ImproperlyConfigured, msg):
get_internal_wsgi_application()
| apache-2.0 |
binhex/moviegrabber | lib/site-packages/sqlalchemy/testing/warnings.py | 78 | 1679 | # testing/warnings.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
import warnings
from .. import exc as sa_exc
from .. import util
import re
def testing_warn(msg, stacklevel=3):
"""Replaces sqlalchemy.util.warn during tests."""
filename = "sqlalchemy.testing.warnings"
lineno = 1
if isinstance(msg, util.string_types):
warnings.warn_explicit(msg, sa_exc.SAWarning, filename, lineno)
else:
warnings.warn_explicit(msg, filename, lineno)
def resetwarnings():
"""Reset warning behavior to testing defaults."""
util.warn = util.langhelpers.warn = testing_warn
warnings.filterwarnings('ignore',
category=sa_exc.SAPendingDeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning)
warnings.filterwarnings('error', category=sa_exc.SAWarning)
def assert_warnings(fn, warnings, regex=False):
"""Assert that each of the given warnings are emitted by fn."""
from .assertions import eq_, emits_warning
canary = []
orig_warn = util.warn
def capture_warnings(*args, **kw):
orig_warn(*args, **kw)
popwarn = warnings.pop(0)
canary.append(popwarn)
if regex:
assert re.match(popwarn, args[0])
else:
eq_(args[0], popwarn)
util.warn = util.langhelpers.warn = capture_warnings
result = emits_warning()(fn)()
assert canary, "No warning was emitted"
return result
| gpl-3.0 |
ericdill/scikit-beam-examples | demos/1_time_correlation/one-time-correlation.py | 3 | 2993 |
import numpy as np
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import matplotlib.patches as mp
import skxray.core.correlation as corr
import skxray.core.roi as roi
# it would be great to have a link to what this multi-tau scheme is!
num_levels = 7
num_bufs = 8
# load the data
img_stack = np.load("100_500_NIPA_GEL.npy")
# plot the first image to make sure the data loaded correctly
fig, ax = plt.subplots()
ax.imshow(img_stack[0])
ax.set_title("NIPA_GEL_250K")
# define the ROIs
roi_start = 65 # in pixels
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3
# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
spacing=roi_spacing, num_rings=num_rings)
# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(
edges, (y_center, x_center), img_stack.shape[1:])
# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = corr.extract_label_indices(labeled_roi_array)
# define the ROIs
roi_start = 65 # in pixels
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3
# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
spacing=roi_spacing, num_rings=num_rings)
# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(
edges, (y_center, x_center), img_stack.shape[1:])
# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = corr.extract_label_indices(labeled_roi_array)
def overlay_rois(ax, inds, pix_list, img_dim, image):
"""
This will plot the reqiured roi's on the image
"""
tt = np.zeros(img_dim).ravel() * np.nan
tt[pix_list] = inds
im = ax.imshow(image, interpolation='none', norm=LogNorm())
im = ax.imshow(tt.reshape(*img_dim), cmap='Paired',
interpolation='nearest')
roi_names = ['gray', 'orange', 'brown']
tt = np.zeros(img_stack.shape[1:]).ravel()
tt[pixel_list] = roi_indices
fig, ax = plt.subplots()
ax.set_title("NIPA_GEL_250K")
overlay_rois(ax, roi_indices, pixel_list,
img_stack.shape[1:], img_stack[0])
# g2 one time correlation results for 3 ROI's
g2, lag_steps = corr.multi_tau_auto_corr(
num_levels, num_bufs, labeled_roi_array, img_stack)
# lag_staps are delays for multiple tau analysis
lag_time = 0.001
lag_step = lag_steps[:g2.shape[0]]
lags = lag_step*lag_time
fig, axes = plt.subplots(num_rings, sharex=True, figsize=(5,10))
axes[num_rings-1].set_xlabel("lags")
for i, roi_color in zip(range(num_rings), roi_names):
axes[i].set_ylabel("g2")
axes[i].semilogx(lags, g2[:, i], 'o', markerfacecolor=roi_color, markersize=10)
axes[i].set_ylim(bottom=1, top=np.max(g2[1:, i]))
plt.show()
| bsd-3-clause |
chen0510566/MissionPlanner | Lib/site-packages/numpy/distutils/command/scons.py | 93 | 24248 | import os
import sys
import os.path
from os.path import join as pjoin, dirname as pdirname
from distutils.errors import DistutilsPlatformError
from distutils.errors import DistutilsExecError, DistutilsSetupError
from numpy.distutils.command.build_ext import build_ext as old_build_ext
from numpy.distutils.ccompiler import CCompiler, new_compiler
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils import log
from numpy.distutils.misc_util import is_bootstrapping, get_cmd
from numpy.distutils.misc_util import get_numpy_include_dirs as _incdir
from numpy.distutils.compat import get_exception
# A few notes:
# - numscons is not mandatory to build numpy, so we cannot import it here.
# Any numscons import has to happen once we check numscons is available and
# is required for the build (call through setupscons.py or native numscons
# build).
def get_scons_build_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_build_dir
return get_scons_build_dir()
def get_scons_pkg_build_dir(pkg):
"""Return the build directory for the given package (foo.bar).
The path is relative to the top setup.py"""
from numscons.core.utils import pkg_to_path
return pjoin(get_scons_build_dir(), pkg_to_path(pkg))
def get_scons_configres_dir():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_dir
return get_scons_configres_dir()
def get_scons_configres_filename():
"""Return the top path where everything produced by scons will be put.
The path is relative to the top setup.py"""
from numscons import get_scons_configres_filename
return get_scons_configres_filename()
def get_scons_local_path():
"""This returns the full path where scons.py for scons-local is located."""
from numscons import get_scons_path
return get_scons_path()
def _get_top_dir(pkg):
# XXX: this mess is necessary because scons is launched per package, and
# has no knowledge outside its build dir, which is package dependent. If
# one day numscons does not launch one process/package, this will be
# unnecessary.
from numscons import get_scons_build_dir
from numscons.core.utils import pkg_to_path
scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg))
n = scdir.count(os.sep)
return os.sep.join([os.pardir for i in range(n+1)])
def get_distutils_libdir(cmd, pkg):
"""Returns the path where distutils install libraries, relatively to the
scons build directory."""
return pjoin(_get_top_dir(pkg), cmd.build_lib)
def get_distutils_clibdir(cmd, pkg):
"""Returns the path where distutils put pure C libraries."""
return pjoin(_get_top_dir(pkg), cmd.build_clib)
def get_distutils_install_prefix(pkg, inplace):
"""Returns the installation path for the current package."""
from numscons.core.utils import pkg_to_path
if inplace == 1:
return pkg_to_path(pkg)
else:
install_cmd = get_cmd('install').get_finalized_command('install')
return pjoin(install_cmd.install_libbase, pkg_to_path(pkg))
def get_python_exec_invoc():
"""This returns the python executable from which this file is invocated."""
# Do we need to take into account the PYTHONPATH, in a cross platform way,
# that is the string returned can be executed directly on supported
# platforms, and the sys.path of the executed python should be the same
# than the caller ? This may not be necessary, since os.system is said to
# take into accound os.environ. This actually also works for my way of
# using "local python", using the alias facility of bash.
return sys.executable
def get_numpy_include_dirs(sconscript_path):
"""Return include dirs for numpy.
The paths are relatively to the setup.py script path."""
from numscons import get_scons_build_dir
scdir = pjoin(get_scons_build_dir(), pdirname(sconscript_path))
n = scdir.count(os.sep)
dirs = _incdir()
rdirs = []
for d in dirs:
rdirs.append(pjoin(os.sep.join([os.pardir for i in range(n+1)]), d))
return rdirs
def dirl_to_str(dirlist):
"""Given a list of directories, returns a string where the paths are
concatenated by the path separator.
example: ['foo/bar', 'bar/foo'] will return 'foo/bar:bar/foo'."""
return os.pathsep.join(dirlist)
def dist2sconscc(compiler):
"""This converts the name passed to distutils to scons name convention (C
compiler). compiler should be a CCompiler instance.
Example:
--compiler=intel -> intelc"""
compiler_type = compiler.compiler_type
if compiler_type == 'msvc':
return 'msvc'
elif compiler_type == 'intel':
return 'intelc'
else:
return compiler.compiler[0]
def dist2sconsfc(compiler):
"""This converts the name passed to distutils to scons name convention
(Fortran compiler). The argument should be a FCompiler instance.
Example:
--fcompiler=intel -> ifort on linux, ifl on windows"""
if compiler.compiler_type == 'intel':
#raise NotImplementedError('FIXME: intel fortran compiler name ?')
return 'ifort'
elif compiler.compiler_type == 'gnu':
return 'g77'
elif compiler.compiler_type == 'gnu95':
return 'gfortran'
elif compiler.compiler_type == 'sun':
return 'sunf77'
else:
# XXX: Just give up for now, and use generic fortran compiler
return 'fortran'
def dist2sconscxx(compiler):
"""This converts the name passed to distutils to scons name convention
(C++ compiler). The argument should be a Compiler instance."""
if compiler.compiler_type == 'msvc':
return compiler.compiler_type
return compiler.compiler_cxx[0]
def get_compiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of C compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler[0]
def get_f77_compiler_executable(compiler):
"""For any give FCompiler instance, this gives us the name of F77 compiler
(the actual executable)."""
return compiler.compiler_f77[0]
def get_cxxcompiler_executable(compiler):
"""For any give CCompiler instance, this gives us the name of CXX compiler
(the actual executable).
NOTE: does NOT work with FCompiler instances."""
# Geez, why does distutils has no common way to get the compiler name...
if compiler.compiler_type == 'msvc':
# this is harcoded in distutils... A bit cleaner way would be to
# initialize the compiler instance and then get compiler.cc, but this
# may be costly: we really just want a string.
# XXX: we need to initialize the compiler anyway, so do not use
# hardcoded string
#compiler.initialize()
#print compiler.cc
return 'cl.exe'
else:
return compiler.compiler_cxx[0]
def get_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def get_f77_tool_path(compiler):
"""Given a distutils.ccompiler.FCompiler class, returns the path of the
toolset related to F77 compilation."""
fullpath_exec = find_executable(get_f77_compiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find F77 compiler executable "\
"info for scons")
return fullpath
def get_cxx_tool_path(compiler):
"""Given a distutils.ccompiler.CCompiler class, returns the path of the
toolset related to C compilation."""
fullpath_exec = find_executable(get_cxxcompiler_executable(compiler))
if fullpath_exec:
fullpath = pdirname(fullpath_exec)
else:
raise DistutilsSetupError("Could not find compiler executable info for scons")
return fullpath
def protect_path(path):
"""Convert path (given as a string) to something the shell will have no
problem to understand (space, etc... problems)."""
if path:
# XXX: to this correctly, this is totally bogus for now (does not check for
# already quoted path, for example).
return '"' + path + '"'
else:
return '""'
def parse_package_list(pkglist):
return pkglist.split(",")
def find_common(seq1, seq2):
"""Given two list, return the index of the common items.
The index are relative to seq1.
Note: do not handle duplicate items."""
dict2 = dict([(i, None) for i in seq2])
return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])]
def select_packages(sconspkg, pkglist):
"""Given a list of packages in pkglist, return the list of packages which
match this list."""
common = find_common(sconspkg, pkglist)
if not len(common) == len(pkglist):
msg = "the package list contains a package not found in "\
"the current list. The current list is %s" % sconspkg
raise ValueError(msg)
return common
def check_numscons(minver):
"""Check that we can use numscons.
minver is a 3 integers tuple which defines the min version."""
try:
import numscons
except ImportError:
e = get_exception()
raise RuntimeError("importing numscons failed (error was %s), using " \
"scons within distutils is not possible without "
"this package " % str(e))
try:
# version_info was added in 0.10.0
from numscons import version_info
# Stupid me used string instead of numbers in version_info in
# dev versions of 0.10.0
if isinstance(version_info[0], str):
raise ValueError("Numscons %s or above expected " \
"(detected 0.10.0)" % str(minver))
# Stupid me used list instead of tuple in numscons
version_info = tuple(version_info)
if version_info[:3] < minver:
raise ValueError("Numscons %s or above expected (got %s) "
% (str(minver), str(version_info[:3])))
except ImportError:
raise RuntimeError("You need numscons >= %s to build numpy "\
"with numscons (imported numscons path " \
"is %s)." % (minver, numscons.__file__))
# XXX: this is a giantic mess. Refactor this at some point.
class scons(old_build_ext):
# XXX: add an option to the scons command for configuration (auto/force/cache).
description = "Scons builder"
library_options = [
('with-perflib=', None,
'Specify which performance library to use for BLAS/LAPACK/etc...' \
'Examples: mkl/atlas/sunper/accelerate'),
('with-mkl-lib=', None, 'TODO'),
('with-mkl-include=', None, 'TODO'),
('with-mkl-libraries=', None, 'TODO'),
('with-atlas-lib=', None, 'TODO'),
('with-atlas-include=', None, 'TODO'),
('with-atlas-libraries=', None, 'TODO')
]
user_options = [
('jobs=', 'j', "specify number of worker threads when executing" \
"scons"),
('inplace', 'i', 'If specified, build in place.'),
('import-env', 'e', 'If specified, import user environment into scons env["ENV"].'),
('bypass', 'b', 'Bypass distutils compiler detection (experimental).'),
('scons-tool-path=', None, 'specify additional path '\
'(absolute) to look for scons tools'),
('silent=', None, 'specify whether scons output should less verbose'\
'(1), silent (2), super silent (3) or not (0, default)'),
('log-level=', None, 'specify log level for numscons. Any value ' \
'valid for the logging python module is valid'),
('package-list=', None,
'If specified, only run scons on the given '\
'packages (example: --package-list=scipy.cluster). If empty, '\
'no package is built'),
('fcompiler=', None, "specify the Fortran compiler type"),
('compiler=', None, "specify the C compiler type"),
('cxxcompiler=', None,
"specify the C++ compiler type (same as C by default)"),
('debug', 'g',
"compile/link with debugging information"),
] + library_options
def initialize_options(self):
old_build_ext.initialize_options(self)
self.build_clib = None
self.debug = 0
self.compiler = None
self.cxxcompiler = None
self.fcompiler = None
self.jobs = None
self.silent = 0
self.import_env = 0
self.scons_tool_path = ''
# If true, we bypass distutils to find the c compiler altogether. This
# is to be used in desperate cases (like incompatible visual studio
# version).
self._bypass_distutils_cc = False
# scons compilers
self.scons_compiler = None
self.scons_compiler_path = None
self.scons_fcompiler = None
self.scons_fcompiler_path = None
self.scons_cxxcompiler = None
self.scons_cxxcompiler_path = None
self.package_list = None
self.inplace = 0
self.bypass = 0
# Only critical things
self.log_level = 50
# library options
self.with_perflib = []
self.with_mkl_lib = []
self.with_mkl_include = []
self.with_mkl_libraries = []
self.with_atlas_lib = []
self.with_atlas_include = []
self.with_atlas_libraries = []
def _init_ccompiler(self, compiler_type):
# XXX: The logic to bypass distutils is ... not so logic.
if compiler_type == 'msvc':
self._bypass_distutils_cc = True
try:
distutils_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
distutils_compiler.customize(self.distribution)
# This initialization seems necessary, sometimes, for find_executable to work...
if hasattr(distutils_compiler, 'initialize'):
distutils_compiler.initialize()
self.scons_compiler = dist2sconscc(distutils_compiler)
self.scons_compiler_path = protect_path(get_tool_path(distutils_compiler))
except DistutilsPlatformError:
e = get_exception()
if not self._bypass_distutils_cc:
raise e
else:
self.scons_compiler = compiler_type
def _init_fcompiler(self, compiler_type):
self.fcompiler = new_fcompiler(compiler = compiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
self.scons_fcompiler = dist2sconsfc(self.fcompiler)
self.scons_fcompiler_path = protect_path(get_f77_tool_path(self.fcompiler))
def _init_cxxcompiler(self, compiler_type):
cxxcompiler = new_compiler(compiler = compiler_type,
verbose = self.verbose,
dry_run = self.dry_run,
force = self.force)
if cxxcompiler is not None:
cxxcompiler.customize(self.distribution, need_cxx = 1)
cxxcompiler.customize_cmd(self)
self.cxxcompiler = cxxcompiler.cxx_compiler()
try:
get_cxx_tool_path(self.cxxcompiler)
except DistutilsSetupError:
self.cxxcompiler = None
if self.cxxcompiler:
self.scons_cxxcompiler = dist2sconscxx(self.cxxcompiler)
self.scons_cxxcompiler_path = protect_path(get_cxx_tool_path(self.cxxcompiler))
def finalize_options(self):
old_build_ext.finalize_options(self)
self.sconscripts = []
self.pre_hooks = []
self.post_hooks = []
self.pkg_names = []
self.pkg_paths = []
if self.distribution.has_scons_scripts():
for i in self.distribution.scons_data:
self.sconscripts.append(i.scons_path)
self.pre_hooks.append(i.pre_hook)
self.post_hooks.append(i.post_hook)
self.pkg_names.append(i.parent_name)
self.pkg_paths.append(i.pkg_path)
# This crap is needed to get the build_clib
# directory
build_clib_cmd = get_cmd("build_clib").get_finalized_command("build_clib")
self.build_clib = build_clib_cmd.build_clib
if not self.cxxcompiler:
self.cxxcompiler = self.compiler
# To avoid trouble, just don't do anything if no sconscripts are used.
# This is useful when for example f2py uses numpy.distutils, because
# f2py does not pass compiler information to scons command, and the
# compilation setup below can crash in some situation.
if len(self.sconscripts) > 0:
if self.bypass:
self.scons_compiler = self.compiler
self.scons_fcompiler = self.fcompiler
self.scons_cxxcompiler = self.cxxcompiler
else:
# Try to get the same compiler than the ones used by distutils: this is
# non trivial because distutils and scons have totally different
# conventions on this one (distutils uses PATH from user's environment,
# whereas scons uses standard locations). The way we do it is once we
# got the c compiler used, we use numpy.distutils function to get the
# full path, and add the path to the env['PATH'] variable in env
# instance (this is done in numpy.distutils.scons module).
self._init_ccompiler(self.compiler)
self._init_fcompiler(self.fcompiler)
self._init_cxxcompiler(self.cxxcompiler)
if self.package_list:
self.package_list = parse_package_list(self.package_list)
def _call_scons(self, scons_exec, sconscript, pkg_name, pkg_path, bootstrapping):
# XXX: when a scons script is missing, scons only prints warnings, and
# does not return a failure (status is 0). We have to detect this from
# distutils (this cannot work for recursive scons builds...)
# XXX: passing everything at command line may cause some trouble where
# there is a size limitation ? What is the standard solution in thise
# case ?
cmd = [scons_exec, "-f", sconscript, '-I.']
if self.jobs:
cmd.append(" --jobs=%d" % int(self.jobs))
if self.inplace:
cmd.append("inplace=1")
cmd.append('scons_tool_path="%s"' % self.scons_tool_path)
cmd.append('src_dir="%s"' % pdirname(sconscript))
cmd.append('pkg_path="%s"' % pkg_path)
cmd.append('pkg_name="%s"' % pkg_name)
cmd.append('log_level=%s' % self.log_level)
#cmd.append('distutils_libdir=%s' % protect_path(pjoin(self.build_lib,
# pdirname(sconscript))))
cmd.append('distutils_libdir=%s' %
protect_path(get_distutils_libdir(self, pkg_name)))
cmd.append('distutils_clibdir=%s' %
protect_path(get_distutils_clibdir(self, pkg_name)))
prefix = get_distutils_install_prefix(pkg_name, self.inplace)
cmd.append('distutils_install_prefix=%s' % protect_path(prefix))
if not self._bypass_distutils_cc:
cmd.append('cc_opt=%s' % self.scons_compiler)
if self.scons_compiler_path:
cmd.append('cc_opt_path=%s' % self.scons_compiler_path)
else:
cmd.append('cc_opt=%s' % self.scons_compiler)
cmd.append('debug=%s' % self.debug)
if self.scons_fcompiler:
cmd.append('f77_opt=%s' % self.scons_fcompiler)
if self.scons_fcompiler_path:
cmd.append('f77_opt_path=%s' % self.scons_fcompiler_path)
if self.scons_cxxcompiler:
cmd.append('cxx_opt=%s' % self.scons_cxxcompiler)
if self.scons_cxxcompiler_path:
cmd.append('cxx_opt_path=%s' % self.scons_cxxcompiler_path)
cmd.append('include_bootstrap=%s' % dirl_to_str(get_numpy_include_dirs(sconscript)))
cmd.append('bypass=%s' % self.bypass)
cmd.append('import_env=%s' % self.import_env)
if self.silent:
if int(self.silent) == 2:
cmd.append('-Q')
elif int(self.silent) == 3:
cmd.append('-s')
cmd.append('silent=%d' % int(self.silent))
cmd.append('bootstrapping=%d' % bootstrapping)
cmdstr = ' '.join(cmd)
if int(self.silent) < 1:
log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr)
else:
log.info("======== Executing scons command for pkg %s =========", pkg_name)
st = os.system(cmdstr)
if st:
#print "status is %d" % st
msg = "Error while executing scons command."
msg += " See above for more information.\n"
msg += """\
If you think it is a problem in numscons, you can also try executing the scons
command with --log-level option for more detailed output of what numscons is
doing, for example --log-level=0; the lowest the level is, the more detailed
the output it."""
raise DistutilsExecError(msg)
def run(self):
if len(self.sconscripts) < 1:
# nothing to do, just leave it here.
return
check_numscons(minver=(0, 11, 0))
if self.package_list is not None:
id = select_packages(self.pkg_names, self.package_list)
sconscripts = [self.sconscripts[i] for i in id]
pre_hooks = [self.pre_hooks[i] for i in id]
post_hooks = [self.post_hooks[i] for i in id]
pkg_names = [self.pkg_names[i] for i in id]
pkg_paths = [self.pkg_paths[i] for i in id]
else:
sconscripts = self.sconscripts
pre_hooks = self.pre_hooks
post_hooks = self.post_hooks
pkg_names = self.pkg_names
pkg_paths = self.pkg_paths
if is_bootstrapping():
bootstrapping = 1
else:
bootstrapping = 0
scons_exec = get_python_exec_invoc()
scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py'))
for sconscript, pre_hook, post_hook, pkg_name, pkg_path in zip(sconscripts,
pre_hooks, post_hooks,
pkg_names, pkg_paths):
if pre_hook:
pre_hook()
if sconscript:
self._call_scons(scons_exec, sconscript, pkg_name, pkg_path, bootstrapping)
if post_hook:
post_hook(**{'pkg_name': pkg_name, 'scons_cmd' : self})
| gpl-3.0 |
apixandru/intellij-community | plugins/hg4idea/testData/bin/mercurial/hbisect.py | 92 | 9226 | # changelog bisection for mercurial
#
# Copyright 2007 Matt Mackall
# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
#
# Inspired by git bisect, extension skeleton taken from mq.py.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, error
from i18n import _
from node import short, hex
import util
def bisect(changelog, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good) tuple.
'nodes' is the final result of the bisect if 'number' is 0.
Otherwise 'number' indicates the remaining possible candidates for
the search and 'nodes' contains the next bisect target.
'good' is True if bisect is searching for a first good changeset, False
if searching for a first bad one.
"""
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state['skip']])
def buildancestors(bad, good):
# only the earliest bad revision matters
badrev = min([changelog.rev(n) for n in bad])
goodrevs = [changelog.rev(n) for n in good]
goodrev = min(goodrevs)
# build visit array
ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
# set nodes descended from goodrevs
for rev in goodrevs:
ancestors[rev] = []
for rev in changelog.revs(goodrev + 1):
for prev in clparents(rev):
if ancestors[prev] == []:
ancestors[rev] = []
# clear good revs from array
for rev in goodrevs:
ancestors[rev] = None
for rev in changelog.revs(len(changelog), goodrev):
if ancestors[rev] is None:
for prev in clparents(rev):
ancestors[prev] = None
if ancestors[badrev] is None:
return badrev, None
return badrev, ancestors
good = False
badrev, ancestors = buildancestors(state['bad'], state['good'])
if not ancestors: # looking for bad to good transition?
good = True
badrev, ancestors = buildancestors(state['good'], state['bad'])
bad = changelog.node(badrev)
if not ancestors: # now we're confused
if len(state['bad']) == 1 and len(state['good']) == 1:
raise util.Abort(_("starting revisions are not directly related"))
raise util.Abort(_("inconsistent state, %s:%s is good and bad")
% (badrev, short(bad)))
# build children dict
children = {}
visit = util.deque([badrev])
candidates = []
while visit:
rev = visit.popleft()
if ancestors[rev] == []:
candidates.append(rev)
for prev in clparents(rev):
if prev != -1:
if prev in children:
children[prev].append(rev)
else:
children[prev] = [rev]
visit.append(prev)
candidates.sort()
# have we narrowed it down to one entry?
# or have all other possible candidates besides 'bad' have been skipped?
tot = len(candidates)
unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
if tot == 1 or not unskipped:
return ([changelog.node(rev) for rev in candidates], 0, good)
perfect = tot // 2
# find the best node to test
best_rev = None
best_len = -1
poison = set()
for rev in candidates:
if rev in poison:
# poison children
poison.update(children.get(rev, []))
continue
a = ancestors[rev] or [rev]
ancestors[rev] = None
x = len(a) # number of ancestors
y = tot - x # number of non-ancestors
value = min(x, y) # how good is this test?
if value > best_len and rev not in skip:
best_len = value
best_rev = rev
if value == perfect: # found a perfect candidate? quit early
break
if y < perfect and rev not in skip: # all downhill from here?
# poison children
poison.update(children.get(rev, []))
continue
for c in children.get(rev, []):
if ancestors[c]:
ancestors[c] = list(set(ancestors[c] + a))
else:
ancestors[c] = a + [c]
assert best_rev is not None
best_node = changelog.node(best_rev)
return ([best_node], tot, good)
def load_state(repo):
state = {'current': [], 'good': [], 'bad': [], 'skip': []}
if os.path.exists(repo.join("bisect.state")):
for l in repo.opener("bisect.state"):
kind, node = l[:-1].split()
node = repo.lookup(node)
if kind not in state:
raise util.Abort(_("unknown bisect kind %s") % kind)
state[kind].append(node)
return state
def save_state(repo, state):
f = repo.opener("bisect.state", "w", atomictemp=True)
wlock = repo.wlock()
try:
for kind in sorted(state):
for node in state[kind]:
f.write("%s %s\n" % (kind, hex(node)))
f.close()
finally:
wlock.release()
def get(repo, status):
"""
Return a list of revision(s) that match the given status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
state = load_state(repo)
if status in ('good', 'bad', 'skip', 'current'):
return map(repo.changelog.rev, state[status])
else:
# In the following sets, we do *not* call 'bisect()' with more
# than one level of recursion, because that can be very, very
# time consuming. Instead, we always develop the expression as
# much as possible.
# 'range' is all csets that make the bisection:
# - have a good ancestor and a bad descendant, or conversely
# that's because the bisection can go either way
range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
_t = repo.revs('bisect(good)::bisect(bad)')
# The sets of topologically good or bad csets
if len(_t) == 0:
# Goods are topologically after bads
goods = 'bisect(good)::' # Pruned good csets
bads = '::bisect(bad)' # Pruned bad csets
else:
# Goods are topologically before bads
goods = '::bisect(good)' # Pruned good csets
bads = 'bisect(bad)::' # Pruned bad csets
# 'pruned' is all csets whose fate is already known: good, bad, skip
skips = 'bisect(skip)' # Pruned skipped csets
pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
# 'untested' is all cset that are- in 'range', but not in 'pruned'
untested = '( (%s) - (%s) )' % (range, pruned)
# 'ignored' is all csets that were not used during the bisection
# due to DAG topology, but may however have had an impact.
# E.g., a branch merged between bads and goods, but whose branch-
# point is out-side of the range.
iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
if status == 'range':
return repo.revs(range)
elif status == 'pruned':
return repo.revs(pruned)
elif status == 'untested':
return repo.revs(untested)
elif status == 'ignored':
return repo.revs(ignored)
elif status == "goods":
return repo.revs(goods)
elif status == "bads":
return repo.revs(bads)
else:
raise error.ParseError(_('invalid bisect state'))
def label(repo, node):
rev = repo.changelog.rev(node)
# Try explicit sets
if rev in get(repo, 'good'):
# i18n: bisect changeset status
return _('good')
if rev in get(repo, 'bad'):
# i18n: bisect changeset status
return _('bad')
if rev in get(repo, 'skip'):
# i18n: bisect changeset status
return _('skipped')
if rev in get(repo, 'untested') or rev in get(repo, 'current'):
# i18n: bisect changeset status
return _('untested')
if rev in get(repo, 'ignored'):
# i18n: bisect changeset status
return _('ignored')
# Try implicit sets
if rev in get(repo, 'goods'):
# i18n: bisect changeset status
return _('good (implicit)')
if rev in get(repo, 'bads'):
# i18n: bisect changeset status
return _('bad (implicit)')
return None
def shortlabel(label):
if label:
return label[0].upper()
return None
| apache-2.0 |
dedalusdev/docker-registry | depends/docker-registry-core/docker_registry/testing/mock_dict.py | 38 | 1431 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Extend Mock class with dictionary behavior.
Call it as:
mocked_dict = MockDict()
mocked_dict.add_dict_methods()'''
import mock
MagicMock__init__ = mock.MagicMock.__init__
class MockDict(mock.MagicMock):
def __init__(self, *args, **kwargs):
MagicMock__init__(self, *args, **kwargs)
self._mock_dict = {}
@property
def get_dict(self):
return self._mock_dict
def add_dict_methods(self):
def setitem(key, value):
self._mock_dict[key] = value
def delitem(key):
del self._mock_dict[key]
self.__getitem__.side_effect = lambda key: self._mock_dict[key]
self.__setitem__.side_effect = setitem
self.__delitem__.side_effect = delitem
self.__contains__.side_effect = lambda key: key in self._mock_dict
| apache-2.0 |
phazel/pixelated-user-agent | service/pixelated/adapter/mailstore/body_parser.py | 2 | 1493 | #
# Copyright (c) 2015 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from email.parser import Parser
class BodyParser(object):
def __init__(self, content, content_type='text/plain; charset="us-ascii"', content_transfer_encoding=None):
self._content = content
self._content_type = content_type
self._content_transfer_encoding = content_transfer_encoding
def parsed_content(self):
parser = Parser()
text = ''
text += 'Content-Type: %s\n' % self._content_type
if self._content_transfer_encoding is not None:
text += 'Content-Transfer-Encoding: %s\n' % self._content_transfer_encoding
text += '\n'
text += self._content
parsed_body = parser.parsestr(text)
result = unicode(parsed_body.get_payload(decode=True), encoding='utf-8')
return unicode(result)
| agpl-3.0 |
ahuarte47/QGIS | tests/src/python/test_qgslayoutmapgrid.py | 15 | 26980 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemMapGrid.
.. note. This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '20/10/2017'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtCore import QRectF
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.core import (QgsLayoutItemMap,
QgsLayoutItemMapGrid,
QgsRectangle,
QgsLayout,
QgsProperty,
QgsLayoutObject,
QgsFontUtils,
QgsProject)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, getTestFont
from qgslayoutchecker import QgsLayoutChecker
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutMapGrid(unittest.TestCase):
def testGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
self.assertFalse(map.grids().hasEnabledItems())
"""Test that we can create a grid for a map."""
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
self.assertTrue(map.grids().hasEnabledItems())
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont())
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_grid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testCrossGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.Cross)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_crossgrid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testMarkerGrid(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.Markers)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().markerSymbol().symbolLayer(0).setStrokeColor(QColor(0, 0, 0))
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_markergrid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
assert myTestResult, myMessage
def testFrameOnly(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.FrameAnnotationsOnly)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFramePenSize(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_gridframeonly', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
map.grid().setStyle(QgsLayoutItemMapGrid.Solid)
map.grid().setEnabled(False)
map.grid().setAnnotationEnabled(False)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.NoFrame)
assert myTestResult, myMessage
def testZebraStyle(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(785462.375, 3341423.125,
789262.375, 3343323.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setGridLineColor(QColor(0, 0, 0))
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(255, 100, 0, 200))
map.grid().setFrameFillColor1(QColor(50, 90, 50, 100))
map.grid().setFrameFillColor2(QColor(200, 220, 100, 60))
map.grid().setEnabled(True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
def testZebraStyleSides(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setGridLineColor(QColor(0, 0, 0))
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setEnabled(True)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameLeft, True)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameRight, False)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameTop, False)
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameBottom, False)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_left', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameTop, True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_lefttop', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameRight, True)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_zebrastyle_lefttopright', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
map.grid().setFrameSideFlag(QgsLayoutItemMapGrid.FrameBottom, True)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.NoFrame)
def testInteriorTicks(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
myRectangle = QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationFontColor(QColor(0, 0, 0))
map.grid().setBlendMode(QPainter.CompositionMode_SourceOver)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.InteriorTicks)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setEnabled(True)
map.grid().setStyle(QgsLayoutItemMapGrid.FrameAnnotationsOnly)
map.updateBoundingRect()
checker = QgsLayoutChecker('composermap_interiorticks', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout(0, 100)
assert myTestResult, myMessage
def testExpressionContext(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
map.setExtent(QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125))
map.setScale(1000)
layout.addLayoutItem(map)
# grid expression context should inherit from map, so variables like @map_scale can be used
context = map.grid().createExpressionContext()
self.assertAlmostEqual(context.variable('map_scale'), 1000, 5)
self.assertEqual(context.variable('grid_number'), 0)
self.assertEqual(context.variable('grid_axis'), 'x')
self.assertEqual(context.variable('item_uuid'), map.uuid())
def testDataDefinedEnabled(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont())
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridEnabled, QgsProperty.fromValue(True))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_grid', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridEnabled, QgsProperty.fromValue(False))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_disabled', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedIntervalOffset(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridIntervalX, QgsProperty.fromValue(1500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridIntervalY, QgsProperty.fromValue(2500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridOffsetX, QgsProperty.fromValue(500))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridOffsetY, QgsProperty.fromValue(250))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_intervaloffset', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedFrameSize(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameSize, QgsProperty.fromValue(20))
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameMargin, QgsProperty.fromValue(10))
map.grid().refresh()
checker = QgsLayoutChecker(''
'composermap_datadefined_framesizemargin', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedCrossSize(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setStyle(QgsLayoutItemMapGrid.Cross)
map.grid().setCrossLength(2.0)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridCrossSize, QgsProperty.fromValue(4))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_crosssize', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedFrameThickness(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(False)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setFrameStyle(QgsLayoutItemMapGrid.Zebra)
map.grid().setFrameWidth(10)
map.grid().setFramePenSize(1)
map.grid().setGridLineWidth(0.5)
map.grid().setFramePenColor(QColor(0, 0, 0))
map.grid().setFrameFillColor1(QColor(0, 0, 0))
map.grid().setFrameFillColor2(QColor(255, 255, 255))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridFrameLineThickness, QgsProperty.fromValue(4))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_framethickness', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDataDefinedAnnotationDistance(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setIntervalX(2000)
map.grid().setIntervalY(2000)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont('Bold', 20))
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().dataDefinedProperties().setProperty(QgsLayoutObject.MapGridLabelDistance, QgsProperty.fromValue(10))
map.grid().refresh()
checker = QgsLayoutChecker('composermap_datadefined_annotationdistance', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
def testDynamicInterval(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 200, 100))
map.setFrameEnabled(True)
map.setBackgroundColor(QColor(150, 100, 100))
layout.addLayoutItem(map)
myRectangle = QgsRectangle(781662.375, 3339523.125,
793062.375, 3345223.125)
map.setExtent(myRectangle)
map.grid().setEnabled(True)
map.grid().setUnits(QgsLayoutItemMapGrid.DynamicPageSizeBased)
map.grid().setMinimumIntervalWidth(50)
map.grid().setMaximumIntervalWidth(100)
map.grid().setAnnotationEnabled(True)
map.grid().setGridLineColor(QColor(0, 255, 0))
map.grid().setGridLineWidth(0.5)
map.grid().setAnnotationFont(getTestFont('Bold', 20))
map.grid().setAnnotationPrecision(0)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Left)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDisplay(QgsLayoutItemMapGrid.HideAll, QgsLayoutItemMapGrid.Top)
map.grid().setAnnotationPosition(QgsLayoutItemMapGrid.OutsideMapFrame, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Right)
map.grid().setAnnotationDirection(QgsLayoutItemMapGrid.Horizontal, QgsLayoutItemMapGrid.Bottom)
map.grid().setAnnotationFontColor(QColor(255, 0, 0, 150))
map.grid().setBlendMode(QPainter.CompositionMode_Overlay)
map.updateBoundingRect()
map.grid().refresh()
checker = QgsLayoutChecker('composermap_dynamic_5_10', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.setScale(map.scale() * 1.1)
checker = QgsLayoutChecker('composermap_dynamic_5_10_2', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.setScale(map.scale() * 1.8)
checker = QgsLayoutChecker('composermap_dynamic_5_10_3', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
map.grid().setMinimumIntervalWidth(10)
map.grid().setMaximumIntervalWidth(40)
map.grid().refresh()
checker = QgsLayoutChecker('composermap_dynamic_5_10_4', layout)
checker.setControlPathPrefix("composer_mapgrid")
myTestResult, myMessage = checker.testLayout()
self.assertTrue(myTestResult, myMessage)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
hanselke/erpnext-1 | erpnext/accounts/doctype/pos_profile/pos_profile.py | 36 | 2115 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cint
from frappe.model.document import Document
class POSProfile(Document):
def validate(self):
self.check_for_duplicate()
self.validate_all_link_fields()
def check_for_duplicate(self):
res = frappe.db.sql("""select name, user from `tabPOS Profile`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.user, self.name, self.company))
if res:
if res[0][1]:
msgprint(_("POS Profile {0} already created for user: {1} and company {2}").format(res[0][0],
res[0][1], self.company), raise_exception=1)
else:
msgprint(_("Global POS Profile {0} already created for company {1}").format(res[0][0],
self.company), raise_exception=1)
def validate_all_link_fields(self):
accounts = {"Account": [self.cash_bank_account, self.income_account,
self.expense_account], "Cost Center": [self.cost_center],
"Warehouse": [self.warehouse]}
for link_dt, dn_list in accounts.items():
for link_dn in dn_list:
if link_dn and not frappe.db.exists({"doctype": link_dt,
"company": self.company, "name": link_dn}):
frappe.throw(_("{0} does not belong to Company {1}").format(link_dn, self.company))
def on_update(self):
self.set_defaults()
def on_trash(self):
self.set_defaults(include_current_pos=False)
def set_defaults(self, include_current_pos=True):
frappe.defaults.clear_default("is_pos")
if not include_current_pos:
condition = " where name != '%s'" % self.name.replace("'", "\'")
else:
condition = ""
pos_view_users = frappe.db.sql_list("""select user
from `tabPOS Profile` {0}""".format(condition))
for user in pos_view_users:
if user:
frappe.defaults.set_user_default("is_pos", 1, user)
else:
frappe.defaults.set_global_default("is_pos", 1)
@frappe.whitelist()
def get_series():
return frappe.get_meta("Sales Invoice").get_field("naming_series").options or ""
| agpl-3.0 |
ckirby/django | tests/null_fk_ordering/tests.py | 381 | 2012 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| bsd-3-clause |
naoliv/osmose-backend | plugins/__init__.py | 6 | 1392 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <chove@crans.org> 2009 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
########################################################################### | gpl-3.0 |
taedori81/wagtail | wagtail/wagtailredirects/tests.py | 6 | 8401 | from django.test import TestCase
from django.test.client import Client
from wagtail.wagtailredirects import models
from wagtail.tests.utils import WagtailTestUtils
from django.core.urlresolvers import reverse
class TestRedirects(TestCase):
def test_path_normalisation(self):
# Shortcut to normalise function (to keep things tidy)
normalise_path = models.Redirect.normalise_path
# Create a path
path = normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')
# Test against equivalant paths
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2')) # The exact same URL
self.assertEqual(path, normalise_path('http://mywebsite.com:8000/Hello/world.html?foo=Bar&Baz=quux2')) # Scheme, hostname and port ignored
self.assertEqual(path, normalise_path('Hello/world.html?foo=Bar&Baz=quux2')) # Leading slash can be omitted
self.assertEqual(path, normalise_path('Hello/world.html/?foo=Bar&Baz=quux2')) # Trailing slashes are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?foo=Bar&Baz=quux2#cool')) # Fragments are ignored
self.assertEqual(path, normalise_path('/Hello/world.html?Baz=quux2&foo=Bar')) # Order of query string parameters are ignored
# Test against different paths
self.assertNotEqual(path, normalise_path('/hello/world.html?foo=Bar&Baz=quux2')) # 'hello' is lowercase
self.assertNotEqual(path, normalise_path('/Hello/world?foo=Bar&Baz=quux2')) # No '.html'
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=bar&Baz=Quux2')) # Query string parameters have wrong case
self.assertNotEqual(path, normalise_path('/Hello/world.html?foo=Bar&baz=quux2')) # ditto
self.assertNotEqual(path, normalise_path('/Hello/WORLD.html?foo=Bar&Baz=quux2')) # 'WORLD' is uppercase
self.assertNotEqual(path, normalise_path('/Hello/world.htm?foo=Bar&Baz=quux2')) # '.htm' is not the same as '.html'
# Normalise some rubbish to make sure it doesn't crash
normalise_path('This is not a URL')
normalise_path('//////hello/world')
normalise_path('!#@%$*')
normalise_path('C:\\Program Files (x86)\\Some random program\\file.txt')
def test_basic_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto')
redirect.save()
# Navigate to it
r = c.get('/redirectme/')
# Check that we were redirected
self.assertEqual(r.status_code, 301)
self.assertTrue(r.has_header('Location'))
def test_temporary_redirect(self):
# Get a client
c = Client()
# Create a redirect
redirect = models.Redirect(old_path='/redirectme', redirect_link='/redirectto', is_permanent=False)
redirect.save()
# Navigate to it
r = c.get('/redirectme/')
# Check that we were redirected temporarily
self.assertEqual(r.status_code, 302)
self.assertTrue(r.has_header('Location'))
class TestRedirectsIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailredirects_index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestRedirectsAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailredirects_add_redirect'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailredirects_add_redirect'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/add.html')
def test_add(self):
response = self.post({
'old_path': '/test',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects_index'))
# Check that the redirect was created
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/')
def test_add_validation_error(self):
response = self.post({
'old_path': '',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestRedirectsEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a redirect to edit
self.redirect = models.Redirect(old_path='/test', redirect_link='http://www.test.com/')
self.redirect.save()
# Login
self.login()
def get(self, params={}, redirect_id=None):
return self.client.get(reverse('wagtailredirects_edit_redirect', args=(redirect_id or self.redirect.id, )), params)
def post(self, post_data={}, redirect_id=None):
return self.client.post(reverse('wagtailredirects_edit_redirect', args=(redirect_id or self.redirect.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(redirect_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'old_path': '/test',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects_index'))
# Check that the redirect was edited
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 1)
self.assertEqual(redirects.first().redirect_link, 'http://www.test.com/ive-been-edited')
def test_edit_validation_error(self):
response = self.post({
'old_path': '',
'is_permanent': 'on',
'redirect_link': 'http://www.test.com/ive-been-edited',
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestRedirectsDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a redirect to edit
self.redirect = models.Redirect(old_path='/test', redirect_link='http://www.test.com/')
self.redirect.save()
# Login
self.login()
def get(self, params={}, redirect_id=None):
return self.client.get(reverse('wagtailredirects_delete_redirect', args=(redirect_id or self.redirect.id, )), params)
def post(self, post_data={}, redirect_id=None):
return self.client.post(reverse('wagtailredirects_delete_redirect', args=(redirect_id or self.redirect.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailredirects/confirm_delete.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(redirect_id=100000).status_code, 404)
def test_delete(self):
response = self.post({
'hello': 'world'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailredirects_index'))
# Check that the redirect was deleted
redirects = models.Redirect.objects.filter(old_path='/test')
self.assertEqual(redirects.count(), 0)
| bsd-3-clause |
kalwar/openelisglobal-core | liquibase/OE2.8/testCatalogCI_LNSP/scripts/testCodes.py | 18 | 2104 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
analyze_code = []
billing_code = []
test_name = []
sample_type = []
handled_descriptions = ['']
test_file = open("testName.txt")
sample_file = open("sampleType.txt")
analyze_file = open("analyzeCode.txt")
billing_file = open("billingCode.txt")
result = open("output/testCodeResult.sql",'w')
def esc_char(name):
if "'" in name:
return "$$" + name + "$$"
else:
return "'" + name + "'"
for line in test_file:
test_name.append(line.strip())
test_file.close()
for line in sample_file:
sample_type.append(line.strip())
sample_file.close()
for line in analyze_file:
analyze_code.append(line.strip())
analyze_file.close()
for line in billing_file:
billing_code.append(line.strip())
billing_file.close()
for row in range(0, len(test_name)):
if len(test_name[row]) > 1:
description = esc_char(test_name[row] + "(" + sample_type[row] + ")")
if description.strip() not in handled_descriptions:
handled_descriptions.append( description.strip())
result.write("INSERT INTO test_code( test_id, code_type_id, value, lastupdated) \n\t")
result.write("VALUES ( (select id from clinlims.test where description = " + description + " ), " )
result.write( "(select id from clinlims.test_code_type where schema_name = 'billingCode'), '"+ billing_code[row] + "', now() );\n")
result.write("INSERT INTO test_code( test_id, code_type_id, value, lastupdated) \n\t")
result.write("VALUES ( (select id from clinlims.test where description = " + description + " ), " )
result.write( "(select id from clinlims.test_code_type where schema_name = 'analyzeCode'), '" + analyze_code[row] + "', now() );\n")
result.close()
print "Done check testCodeResult.sql for values"
# VALUES ( (select id from clinlims.test where description = 'Test de VIH' ), (select id from clinlims.test_code_type where schema_name = 'billingCode'), 'B120', now() );
| mpl-2.0 |
jaruba/chromium.src | tools/telemetry/PRESUBMIT.py | 15 | 1727 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
PYLINT_BLACKLIST = []
PYLINT_DISABLED_WARNINGS = ['R0923', 'R0201', 'E1101']
def _CommonChecks(input_api, output_api):
results = []
# TODO(nduca): This should call update_docs.IsUpdateDocsNeeded().
# Disabled due to crbug.com/255326.
if False:
update_docs_path = os.path.join(
input_api.PresubmitLocalPath(), 'update_docs')
assert os.path.exists(update_docs_path)
results.append(output_api.PresubmitError(
'Docs are stale. Please run:\n' +
'$ %s' % os.path.abspath(update_docs_path)))
results.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=PYLINT_BLACKLIST,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
return results
def GetPathsToPrepend(input_api):
return [input_api.PresubmitLocalPath(),
os.path.join(input_api.PresubmitLocalPath(), os.path.pardir,
os.path.pardir, 'third_party', 'typ')]
def RunWithPrependedPath(prepended_path, fn, *args):
old_path = sys.path
try:
sys.path = prepended_path + old_path
return fn(*args)
finally:
sys.path = old_path
def CheckChangeOnUpload(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
def CheckChangeOnCommit(input_api, output_api):
def go():
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
return RunWithPrependedPath(GetPathsToPrepend(input_api), go)
| bsd-3-clause |
damdam-s/OpenUpgrade | addons/payment_ogone/models/ogone.py | 34 | 19078 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
from lxml import etree, objectify
from pprint import pformat
import time
from urllib import urlencode
import urllib2
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.addons.payment_ogone.data import ogone
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(osv.Model):
_inherit = 'payment.acquirer'
def _get_ogone_urls(self, cr, uid, environment, context=None):
""" Ogone URLS:
- standard order: POST address for form-based
@TDETODO: complete me
"""
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _get_providers(self, cr, uid, context=None):
providers = super(PaymentAcquirerOgone, self)._get_providers(cr, uid, context=context)
providers.append(['ogone', 'Ogone'])
return providers
_columns = {
'ogone_pspid': fields.char('PSPID', required_if_provider='ogone'),
'ogone_userid': fields.char('API User ID', required_if_provider='ogone'),
'ogone_password': fields.char('API User Password', required_if_provider='ogone'),
'ogone_shakey_in': fields.char('SHA Key IN', size=32, required_if_provider='ogone'),
'ogone_shakey_out': fields.char('SHA Key OUT', size=32, required_if_provider='ogone'),
}
def _ogone_generate_shasign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (ogone
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'ogone'
key = getattr(acquirer, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',
'CARDNO',
'CCCTY',
'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
ogone_tx_values = dict(tx_values)
temp_ogone_tx_values = {
'PSPID': acquirer.ogone_pspid,
'ORDERID': tx_values['reference'],
'AMOUNT': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'CURRENCY': tx_values['currency'] and tx_values['currency'].name or '',
'LANGUAGE': partner_values['lang'],
'CN': partner_values['name'],
'EMAIL': partner_values['email'],
'OWNERZIP': partner_values['zip'],
'OWNERADDRESS': partner_values['address'],
'OWNERTOWN': partner_values['city'],
'OWNERCTY': partner_values['country'] and partner_values['country'].code or '',
'OWNERTELNO': partner_values['phone'],
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
}
if ogone_tx_values.get('return_url'):
temp_ogone_tx_values['PARAMPLUS'] = 'return_url=%s' % ogone_tx_values.pop('return_url')
shasign = self._ogone_generate_shasign(acquirer, 'in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return partner_values, ogone_tx_values
def ogone_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_ogone_urls(cr, uid, acquirer.environment, context=context)['ogone_standard_order_url']
class PaymentTxOgone(osv.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
_columns = {
'ogone_3ds': fields.boolean('3DS Activated'),
'ogone_3ds_html': fields.html('3DS HTML'),
'ogone_complus': fields.char('Complus'),
'ogone_payid': fields.char('PayID', help='Payment ID, generated by Ogone')
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _ogone_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN')
if not reference or not pay_id or not shasign:
error_msg = 'Ogone: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Ogone: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'out', data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Ogone: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _ogone_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('PAYID') != tx.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, cr, uid, tx, data, context=None):
if tx.state == 'done':
_logger.warning('Ogone: trying to validate an already validated tx (ref %s)' % tx.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
tx.write({
'state': 'done',
'date_validate': data['TRXDATE'],
'acquirer_reference': data['PAYID'],
})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERROR'),
'error_code': data.get('NCERRORPLUS'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERRORPLUS')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_create_alias(self, cr, uid, id, values, context=None):
""" Create an alias at Ogone via batch.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
assert tx.type == 'server2server', 'Calling s2s dedicated method for a %s acquirer' % tx.type
alias = 'OPENERP-%d-%d' % (tx.partner_id.id, tx.id)
expiry_date = '%s%s' % (values['expiry_date_mm'], values['expiry_date_yy'][2:])
line = 'ADDALIAS;%(alias)s;%(holder_name)s;%(number)s;%(expiry_date)s;%(brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry_date=expiry_date, pspid=tx.acquirer_id.ogone_pspid)
tx_data = {
'FILE_REFERENCE': 'OPENERP-NEW-ALIAS-%s' % time.time(), # something unique,
'TRANSACTION_CODE': 'ATR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': line,
'REPLY_TYPE': 'XML',
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
# TODO: fix URL computation
request = urllib2.Request(tx.acquirer_id.ogone_afu_agree_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error) # TODO specific exception
tx.write({'partner_reference': alias})
return True
def ogone_s2s_generate_values(self, cr, uid, id, custom_values, context=None):
""" Generate valid Ogone values for a s2s tx.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = {
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'OrderID': tx.reference,
'amount': '%d' % int(float_round(tx.amount, 2) * 100), # tde check amount or str * 100 ?
'CURRENCY': tx.currency_id.name,
'LANGUAGE': tx.partner_lang,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': tx.partner_reference,
'RTIMEOUT': 30,
}
if custom_values.get('ogone_cvc'):
tx_data['CVC'] = custom_values.get('ogone_cvc')
if custom_values.pop('ogone_3ds', None):
tx_data.update({
'FLAG3D': 'Y', # YEAH!!
})
if custom_values.get('ogone_complus'):
tx_data['COMPLUS'] = custom_values.get('ogone_complus')
if custom_values.get('ogone_accept_url'):
pass
shasign = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'in', tx_data)
tx_data['SHASIGN'] = shasign
return tx_data
def ogone_s2s_feedback(self, cr, uid, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
pass
def ogone_s2s_execute(self, cr, uid, id, values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = self.ogone_s2s_generate_values(cr, uid, id, values, context=context)
_logger.info('Generated Ogone s2s data %s', pformat(tx_data)) # debug
request = urllib2.Request(tx.acquirer_id.ogone_direct_order_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
_logger.info('Contacted Ogone direct order; result %s', result) # debug
tree = objectify.fromstring(result)
payid = tree.get('PAYID')
query_direct_data = dict(
PSPID=tx.acquirer_id.ogone_pspid,
USERID=tx.acquirer_id.ogone_userid,
PSWD=tx.acquirer_id.ogone_password,
ID=payid,
)
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (tx.acquirer_id.environment,)
tries = 2
tx_done = False
tx_status = False
while not tx_done or tries > 0:
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
# see https://secure.ogone.com/ncol/paymentinfos1.asp
VALID_TX = [5, 9]
WAIT_TX = [41, 50, 51, 52, 55, 56, 91, 92, 99]
PENDING_TX = [46] # 3DS HTML response
# other status are errors...
status = tree.get('STATUS')
if status == '':
status = None
else:
status = int(status)
if status in VALID_TX:
tx_status = True
tx_done = True
elif status in PENDING_TX:
html = str(tree.HTML_ANSWER)
tx_data.update(ogone_3ds_html=html.decode('base64'))
tx_status = False
tx_done = True
elif status in WAIT_TX:
time.sleep(1500)
request = urllib2.Request(query_direct_url, urlencode(query_direct_data))
result = urllib2.urlopen(request).read()
_logger.debug('Contacted Ogone query direct; result %s', result)
else:
error_code = tree.get('NCERROR')
if not ogone.retryable(error_code):
error_str = tree.get('NCERRORPLUS')
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = 'ERROR: %s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.info(error)
raise Exception(error)
tries = tries - 1
if not tx_done and tries == 0:
raise Exception('Cannot get transaction status...')
return tx_status
| agpl-3.0 |
CodeMath/jinrockets | BluePrint/lib/bs4/builder/_lxml.py | 446 | 8661 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| mit |
c86j224s/snippet | Python_asyncio_binary_echo/pyclient2/Lib/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py | 8 | 15371 | """
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
from ..packages import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
from pip._vendor import idna
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
name = idna_encode(name)
if sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
if hasattr(peer_cert, "to_cryptography"):
cert = peer_cert.to_cryptography()
else:
# This is technically using private APIs, but should work across all
# relevant versions before PyOpenSSL got a proper API for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, x509.UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
names = [
('DNS', _dnsname_to_stdlib(name))
for name in ext.get_values_for_type(x509.DNSName)
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
wr = util.wait_for_write(self.socket, self.socket.gettimeout())
if not wr:
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(sock, sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
| apache-2.0 |
bboot/doorman | tts.py | 2 | 3924 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around a TTS system."""
import functools
import logging
import os
import subprocess
import tempfile
import wave
import numpy as np
from scipy import signal
import i18n
# Path to a tmpfs directory to avoid SD card wear
TMP_DIR = '/run/user/%d' % os.getuid()
# Expected sample rate from the TTS tool
SAMPLE_RATE = 16000
# Parameters for the equalization filter. These remove low-frequency sound
# from the result, avoiding resonance on the speaker and making the TTS easier
# to understand. Calculated with:
# python3 src/tts.py --hpf-order 4 --hpf-freq-hz 1400 --hpf-gain-db 8
FILTER_A = np.array([1., -3.28274474, 4.09441957, -2.29386174, 0.48627065])
FILTER_B = np.array([1.75161639, -7.00646555, 10.50969833, -7.00646555, 1.75161639])
logger = logging.getLogger('tts')
def print_eq_coefficients(hpf_order, hpf_freq_hz, hpf_gain_db):
"""Calculate and print the coefficients of the equalization filter."""
b, a = signal.butter(hpf_order, hpf_freq_hz / SAMPLE_RATE, 'highpass')
gain_factor = pow(10, hpf_gain_db / 20)
print('FILTER_A = np.%r' % a)
print('FILTER_B = np.%r' % (b * gain_factor))
def create_eq_filter():
"""Return a function that applies equalization to a numpy array."""
def eq_filter(raw_audio):
return signal.lfilter(FILTER_B, FILTER_A, raw_audio)
return eq_filter
def create_say(player):
"""Return a function say(words) for the given player, using the default EQ
filter.
"""
lang = i18n.get_language_code()
return functools.partial(say, player, eq_filter=create_eq_filter(), lang=lang)
def say(player, words, eq_filter=None, lang='en-US'):
"""Say the given words with TTS."""
try:
(fd, raw_wav) = tempfile.mkstemp(suffix='.wav', dir=TMP_DIR)
except IOError:
logger.exception('Using fallback directory for TTS output')
(fd, raw_wav) = tempfile.mkstemp(suffix='.wav')
os.close(fd)
try:
subprocess.call(['pico2wave', '-l', lang, '-w', raw_wav, words])
with wave.open(raw_wav, 'rb') as f:
raw_bytes = f.readframes(f.getnframes())
finally:
os.unlink(raw_wav)
# Deserialize and apply equalization filter
eq_audio = np.frombuffer(raw_bytes, dtype=np.int16)
if eq_filter:
eq_audio = eq_filter(eq_audio)
# Clip and serialize
int16_info = np.iinfo(np.int16)
eq_audio = np.clip(eq_audio, int16_info.min, int16_info.max)
eq_bytes = eq_audio.astype(np.int16).tostring()
player.play_bytes(eq_bytes, sample_rate=SAMPLE_RATE)
def main():
import argparse
import audio
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Test TTS wrapper')
parser.add_argument('words', nargs='*', help='Words to say')
parser.add_argument('--hpf-order', type=int, help='Order of high-pass filter')
parser.add_argument('--hpf-freq-hz', type=int, help='Corner frequency of high-pass filter')
parser.add_argument('--hpf-gain-db', type=int, help='High-frequency gain of filter')
args = parser.parse_args()
if args.words:
words = ' '.join(args.words)
player = audio.Player()
create_say(player)(words)
if args.hpf_order:
print_eq_coefficients(args.hpf_order, args.hpf_freq_hz, args.hpf_gain_db)
if __name__ == '__main__':
main()
| apache-2.0 |
tplusx/ns3-gpsr | src/visualizer/bindings/modulegen__gcc_LP64.py | 30 | 325182 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.visualizer', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pyviz.h (module 'visualizer'): ns3::PyViz [class]
module.add_class('PyViz')
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureMode [enumeration]
module.add_enum('PacketCaptureMode', ['PACKET_CAPTURE_DISABLED', 'PACKET_CAPTURE_FILTER_HEADERS_OR', 'PACKET_CAPTURE_FILTER_HEADERS_AND'], outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample [struct]
module.add_class('LastPacketsSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics [struct]
module.add_class('NetDeviceStatistics', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics [struct]
module.add_class('NodeStatistics', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions [struct]
module.add_class('PacketCaptureOptions', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample [struct]
module.add_class('PacketDropSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample [struct]
module.add_class('PacketSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample [struct]
module.add_class('RxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample [struct]
module.add_class('TransmissionSample', outer_class=root_module['ns3::PyViz'])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample [struct]
module.add_class('TxPacketSample', parent=root_module['ns3::PyViz::PacketSample'], outer_class=root_module['ns3::PyViz'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class]
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration]
module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketAddressTag [class]
module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## ipv4.h (module 'internet'): ns3::Ipv4 [class]
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class]
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration]
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol [class]
module.add_class('Ipv4L4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus [enumeration]
module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::Ipv4L4Protocol'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class]
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class]
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class]
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_container('std::vector< ns3::PyViz::RxPacketSample >', 'ns3::PyViz::RxPacketSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::TxPacketSample >', 'ns3::PyViz::TxPacketSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::PacketSample >', 'ns3::PyViz::PacketSample', container_type='vector')
module.add_container('std::set< ns3::TypeId >', 'ns3::TypeId', container_type='set')
module.add_container('std::vector< ns3::PyViz::TransmissionSample >', 'ns3::PyViz::TransmissionSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::PacketDropSample >', 'ns3::PyViz::PacketDropSample', container_type='vector')
module.add_container('std::vector< ns3::PyViz::NetDeviceStatistics >', 'ns3::PyViz::NetDeviceStatistics', container_type='vector')
module.add_container('std::vector< std::string >', 'std::string', container_type='vector')
module.add_container('std::set< unsigned int >', 'unsigned int', container_type='set')
module.add_container('std::vector< ns3::PyViz::NodeStatistics >', 'ns3::PyViz::NodeStatistics', container_type='vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PyViz_methods(root_module, root_module['ns3::PyViz'])
register_Ns3PyVizLastPacketsSample_methods(root_module, root_module['ns3::PyViz::LastPacketsSample'])
register_Ns3PyVizNetDeviceStatistics_methods(root_module, root_module['ns3::PyViz::NetDeviceStatistics'])
register_Ns3PyVizNodeStatistics_methods(root_module, root_module['ns3::PyViz::NodeStatistics'])
register_Ns3PyVizPacketCaptureOptions_methods(root_module, root_module['ns3::PyViz::PacketCaptureOptions'])
register_Ns3PyVizPacketDropSample_methods(root_module, root_module['ns3::PyViz::PacketDropSample'])
register_Ns3PyVizPacketSample_methods(root_module, root_module['ns3::PyViz::PacketSample'])
register_Ns3PyVizRxPacketSample_methods(root_module, root_module['ns3::PyViz::RxPacketSample'])
register_Ns3PyVizTransmissionSample_methods(root_module, root_module['ns3::PyViz::TransmissionSample'])
register_Ns3PyVizTxPacketSample_methods(root_module, root_module['ns3::PyViz::TxPacketSample'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol'])
register_Ns3Ipv4L4Protocol_methods(root_module, root_module['ns3::Ipv4L4Protocol'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor]
cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function]
cls.add_method('GetLocal',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function]
cls.add_method('GetMask',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function]
cls.add_method('IsSecondary',
'bool',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function]
cls.add_method('SetBroadcast',
'void',
[param('ns3::Ipv4Address', 'broadcast')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv4Address', 'local')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function]
cls.add_method('SetPrimary',
'void',
[])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function]
cls.add_method('SetSecondary',
'void',
[])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PyViz_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PyViz(ns3::PyViz const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PyViz() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample ns3::PyViz::GetLastPackets(uint32_t nodeId) const [member function]
cls.add_method('GetLastPackets',
'ns3::PyViz::LastPacketsSample',
[param('uint32_t', 'nodeId')],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::NodeStatistics,std::allocator<ns3::PyViz::NodeStatistics> > ns3::PyViz::GetNodesStatistics() const [member function]
cls.add_method('GetNodesStatistics',
'std::vector< ns3::PyViz::NodeStatistics >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::PacketDropSample,std::allocator<ns3::PyViz::PacketDropSample> > ns3::PyViz::GetPacketDropSamples() const [member function]
cls.add_method('GetPacketDropSamples',
'std::vector< ns3::PyViz::PacketDropSample >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<std::string, std::allocator<std::string> > ns3::PyViz::GetPauseMessages() const [member function]
cls.add_method('GetPauseMessages',
'std::vector< std::string >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): std::vector<ns3::PyViz::TransmissionSample,std::allocator<ns3::PyViz::TransmissionSample> > ns3::PyViz::GetTransmissionSamples() const [member function]
cls.add_method('GetTransmissionSamples',
'std::vector< ns3::PyViz::TransmissionSample >',
[],
is_const=True)
## pyviz.h (module 'visualizer'): static void ns3::PyViz::LineClipping(double boundsX1, double boundsY1, double boundsX2, double boundsY2, double & lineX1, double & lineY1, double & lineX2, double & lineY2) [member function]
cls.add_method('LineClipping',
'void',
[param('double', 'boundsX1'), param('double', 'boundsY1'), param('double', 'boundsX2'), param('double', 'boundsY2'), param('double &', 'lineX1', direction=3), param('double &', 'lineY1', direction=3), param('double &', 'lineX2', direction=3), param('double &', 'lineY2', direction=3)],
is_static=True)
## pyviz.h (module 'visualizer'): static void ns3::PyViz::Pause(std::string const & message) [member function]
cls.add_method('Pause',
'void',
[param('std::string const &', 'message')],
is_static=True)
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterCsmaLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterCsmaLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterDropTracePath(std::string const & tracePath) [member function]
cls.add_method('RegisterDropTracePath',
'void',
[param('std::string const &', 'tracePath')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterPointToPointLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterPointToPointLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::RegisterWifiLikeDevice(std::string const & deviceTypeName) [member function]
cls.add_method('RegisterWifiLikeDevice',
'void',
[param('std::string const &', 'deviceTypeName')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SetNodesOfInterest(std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > nodes) [member function]
cls.add_method('SetNodesOfInterest',
'void',
[param('std::set< unsigned int >', 'nodes')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SetPacketCaptureOptions(uint32_t nodeId, ns3::PyViz::PacketCaptureOptions options) [member function]
cls.add_method('SetPacketCaptureOptions',
'void',
[param('uint32_t', 'nodeId'), param('ns3::PyViz::PacketCaptureOptions', 'options')])
## pyviz.h (module 'visualizer'): void ns3::PyViz::SimulatorRunUntil(ns3::Time time) [member function]
cls.add_method('SimulatorRunUntil',
'void',
[param('ns3::Time', 'time')])
return
def register_Ns3PyVizLastPacketsSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::LastPacketsSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::LastPacketsSample(ns3::PyViz::LastPacketsSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::LastPacketsSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastDroppedPackets [variable]
cls.add_instance_attribute('lastDroppedPackets', 'std::vector< ns3::PyViz::PacketSample >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastReceivedPackets [variable]
cls.add_instance_attribute('lastReceivedPackets', 'std::vector< ns3::PyViz::RxPacketSample >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::LastPacketsSample::lastTransmittedPackets [variable]
cls.add_instance_attribute('lastTransmittedPackets', 'std::vector< ns3::PyViz::TxPacketSample >', is_const=False)
return
def register_Ns3PyVizNetDeviceStatistics_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::NetDeviceStatistics(ns3::PyViz::NetDeviceStatistics const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::NetDeviceStatistics const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::NetDeviceStatistics() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::receivedBytes [variable]
cls.add_instance_attribute('receivedBytes', 'uint64_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::receivedPackets [variable]
cls.add_instance_attribute('receivedPackets', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::transmittedBytes [variable]
cls.add_instance_attribute('transmittedBytes', 'uint64_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NetDeviceStatistics::transmittedPackets [variable]
cls.add_instance_attribute('transmittedPackets', 'uint32_t', is_const=False)
return
def register_Ns3PyVizNodeStatistics_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::NodeStatistics() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::NodeStatistics(ns3::PyViz::NodeStatistics const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::NodeStatistics const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::nodeId [variable]
cls.add_instance_attribute('nodeId', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::NodeStatistics::statistics [variable]
cls.add_instance_attribute('statistics', 'std::vector< ns3::PyViz::NetDeviceStatistics >', is_const=False)
return
def register_Ns3PyVizPacketCaptureOptions_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::PacketCaptureOptions() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::PacketCaptureOptions(ns3::PyViz::PacketCaptureOptions const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketCaptureOptions const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::headers [variable]
cls.add_instance_attribute('headers', 'std::set< ns3::TypeId >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::mode [variable]
cls.add_instance_attribute('mode', 'ns3::PyViz::PacketCaptureMode', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketCaptureOptions::numLastPackets [variable]
cls.add_instance_attribute('numLastPackets', 'uint32_t', is_const=False)
return
def register_Ns3PyVizPacketDropSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::PacketDropSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::PacketDropSample(ns3::PyViz::PacketDropSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketDropSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::bytes [variable]
cls.add_instance_attribute('bytes', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketDropSample::transmitter [variable]
cls.add_instance_attribute('transmitter', 'ns3::Ptr< ns3::Node >', is_const=False)
return
def register_Ns3PyVizPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::PacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::PacketSample(ns3::PyViz::PacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::PacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::device [variable]
cls.add_instance_attribute('device', 'ns3::Ptr< ns3::NetDevice >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::packet [variable]
cls.add_instance_attribute('packet', 'ns3::Ptr< ns3::Packet >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::PacketSample::time [variable]
cls.add_instance_attribute('time', 'ns3::Time', is_const=False)
return
def register_Ns3PyVizRxPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::RxPacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::RxPacketSample(ns3::PyViz::RxPacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::RxPacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::RxPacketSample::from [variable]
cls.add_instance_attribute('from', 'ns3::Mac48Address', is_const=False)
return
def register_Ns3PyVizTransmissionSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::TransmissionSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::TransmissionSample(ns3::PyViz::TransmissionSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::TransmissionSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::bytes [variable]
cls.add_instance_attribute('bytes', 'uint32_t', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::channel [variable]
cls.add_instance_attribute('channel', 'ns3::Ptr< ns3::Channel >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::receiver [variable]
cls.add_instance_attribute('receiver', 'ns3::Ptr< ns3::Node >', is_const=False)
## pyviz.h (module 'visualizer'): ns3::PyViz::TransmissionSample::transmitter [variable]
cls.add_instance_attribute('transmitter', 'ns3::Ptr< ns3::Node >', is_const=False)
return
def register_Ns3PyVizTxPacketSample_methods(root_module, cls):
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::TxPacketSample() [constructor]
cls.add_constructor([])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::TxPacketSample(ns3::PyViz::TxPacketSample const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PyViz::TxPacketSample const &', 'arg0')])
## pyviz.h (module 'visualizer'): ns3::PyViz::TxPacketSample::to [variable]
cls.add_instance_attribute('to', 'ns3::Mac48Address', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor]
cls.add_constructor([])
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv4Header::DscpType', 'dscp')],
is_const=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv4Header::EcnType', 'ecn')],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function]
cls.add_method('EnableChecksum',
'void',
[])
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv4Header::DscpType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv4Header::EcnType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function]
cls.add_method('GetFragmentOffset',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function]
cls.add_method('GetIdentification',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function]
cls.add_method('GetPayloadSize',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function]
cls.add_method('IsChecksumOk',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function]
cls.add_method('IsDontFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function]
cls.add_method('IsLastFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'destination')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function]
cls.add_method('SetDontFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv4Header::DscpType', 'dscp')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv4Header::EcnType', 'ecn')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint16_t', 'offsetBytes')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function]
cls.add_method('SetIdentification',
'void',
[param('uint16_t', 'identification')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function]
cls.add_method('SetLastFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function]
cls.add_method('SetMayFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function]
cls.add_method('SetMoreFragments',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function]
cls.add_method('SetPayloadSize',
'void',
[param('uint16_t', 'size')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function]
cls.add_method('SetProtocol',
'void',
[param('uint8_t', 'num')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'source')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketAddressTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'addr')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3Ipv4_methods(root_module, cls):
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')])
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor]
cls.add_constructor([])
## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4L3Protocol_methods(root_module, cls):
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor]
cls.add_constructor([])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')])
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function]
cls.add_method('GetInterface',
'ns3::Ptr< ns3::Ipv4Interface >',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'addr')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4L4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::Ipv4L4Protocol >',
[param('int', 'protocolNumber')],
is_const=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::Ipv4L4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::Ipv4L4Protocol >', 'protocol')])
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function]
cls.add_method('SetDefaultTtl',
'void',
[param('uint8_t', 'ttl')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'i'), param('bool', 'val')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'i'), param('uint16_t', 'metric')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'i')],
is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable]
cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_const=True, visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
visibility='private', is_virtual=True)
## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
visibility='private', is_virtual=True)
return
def register_Ns3Ipv4L4Protocol_methods(root_module, cls):
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol() [constructor]
cls.add_constructor([])
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::Ipv4L4Protocol(ns3::Ipv4L4Protocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4L4Protocol const &', 'arg0')])
## ipv4-l4-protocol.h (module 'internet'): ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::Ipv4L4Protocol::GetDownTarget() const [member function]
cls.add_method('GetDownTarget',
'ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): int ns3::Ipv4L4Protocol::GetProtocolNumber() const [member function]
cls.add_method('GetProtocolNumber',
'int',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L4Protocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-l4-protocol.h (module 'internet'): ns3::Ipv4L4Protocol::RxStatus ns3::Ipv4L4Protocol::Receive(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::Ipv4Interface> incomingInterface) [member function]
cls.add_method('Receive',
'ns3::Ipv4L4Protocol::RxStatus',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::ReceiveIcmp(ns3::Ipv4Address icmpSource, uint8_t icmpTtl, uint8_t icmpType, uint8_t icmpCode, uint32_t icmpInfo, ns3::Ipv4Address payloadSource, ns3::Ipv4Address payloadDestination, uint8_t const * payload) [member function]
cls.add_method('ReceiveIcmp',
'void',
[param('ns3::Ipv4Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo'), param('ns3::Ipv4Address', 'payloadSource'), param('ns3::Ipv4Address', 'payloadDestination'), param('uint8_t const *', 'payload')],
is_virtual=True)
## ipv4-l4-protocol.h (module 'internet'): void ns3::Ipv4L4Protocol::SetDownTarget(ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetDownTarget',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function]
cls.add_method('GetGroup',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function]
cls.add_method('GetOrigin',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetOutputTtl(uint32_t oif) [member function]
cls.add_method('GetOutputTtl',
'uint32_t',
[param('uint32_t', 'oif')],
deprecated=True)
## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function]
cls.add_method('GetOutputTtlMap',
'std::map< unsigned int, unsigned int >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function]
cls.add_method('GetParent',
'uint32_t',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function]
cls.add_method('SetGroup',
'void',
[param('ns3::Ipv4Address const', 'group')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function]
cls.add_method('SetOrigin',
'void',
[param('ns3::Ipv4Address const', 'origin')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function]
cls.add_method('SetOutputTtl',
'void',
[param('uint32_t', 'oif'), param('uint32_t', 'ttl')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function]
cls.add_method('SetParent',
'void',
[param('uint32_t', 'iif')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable]
cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable]
cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True)
return
def register_Ns3Ipv4Route_methods(root_module, cls):
cls.add_output_stream_operator()
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function]
cls.add_method('GetGateway',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'dest')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function]
cls.add_method('SetGateway',
'void',
[param('ns3::Ipv4Address', 'gw')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor]
cls.add_constructor([])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')])
## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
nbetcher/latte-2.6.35-crc | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/social/tests/backends/test_deezer.py | 9 | 1231 | import json
from social.tests.backends.oauth import OAuth2Test
class DeezerOAuth2Test(OAuth2Test):
backend_path = 'social.backends.deezer.DeezerOAuth2'
user_data_url = 'http://api.deezer.com/user/me'
expected_username = 'foobar'
access_token_body = 'access_token=foobar&expires=0'
user_data_body = json.dumps({
'id': '1',
'name': 'foobar',
'lastname': '',
'firstname': '',
'status': 0,
'birthday': '1970-01-01',
'inscription_date': '2015-01-01',
'gender': 'M',
'link': 'https://www.deezer.com/profile/1',
'picture': 'https://api.deezer.com/user/1/image',
'picture_small': 'https://cdns-images.dzcdn.net/images/user//56x56-000000-80-0-0.jpg',
'picture_medium': 'https://cdns-images.dzcdn.net/images/user//250x250-000000-80-0-0.jpg',
'picture_big': 'https://cdns-images.dzcdn.net/images/user//500x500-000000-80-0-0.jpg',
'country': 'FR',
'lang': 'FR',
'is_kid': False,
'tracklist': 'https://api.deezer.com/user/1/flow',
'type': 'user'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| apache-2.0 |
gg7/diamond | src/collectors/chronyd/test/testchronyd.py | 31 | 3260 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from chronyd import ChronydCollector
##########################################################################
class TestChronydCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ChronydCollector', {
})
self.collector = ChronydCollector(config, {})
def test_import(self):
self.assertTrue(ChronydCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_ip_addresses(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'fedora').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'178_251_120_16.offset_ms': -7e-05,
'85_12_29_43.offset_ms': -0.785,
'85_234_197_3.offset_ms': 0.08,
'85_255_214_66.offset_ms': 0.386,
}
self.setDocExample(
collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_fqdns(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'fqdn').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'adm-dns-resolver-001.offset_ms': 0.000277,
'adm-dns-resolver-002.offset_ms': 0.456,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_check_invalid_unit(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'bad_unit').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'adm-dns-resolver-002.offset_ms': 0.456,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_huge_values(self, publish_mock):
patch_collector = patch.object(
ChronydCollector,
'get_output',
Mock(return_value=self.getFixture(
'huge_vals').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'server1.offset_ms': 8735472000000,
'server2.offset_ms': -1009152000000,
}
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
mtelahun/purchase-workflow | product_supplierinfo_discount/__init__.py | 11 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
| agpl-3.0 |
wevoice/wesub | apps/messages/signalhandlers.py | 6 | 1113 | # Amara, universalsubtitles.org
#
# Copyright (C) 2014 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete
from auth.models import CustomUser as User
from messages.models import Message
@receiver(post_save, sender=Message)
@receiver(post_delete, sender=Message)
def on_message_saved(sender, instance, **kwargs):
User.cache.invalidate_by_pk(instance.user_id)
| agpl-3.0 |
DinoCow/airflow | tests/providers/apache/sqoop/hooks/test_sqoop.py | 7 | 14202 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import collections
import json
import unittest
from io import StringIO
from unittest.mock import call, patch
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.apache.sqoop.hooks.sqoop import SqoopHook
from airflow.utils import db
class TestSqoopHook(unittest.TestCase):
_config = {
'conn_id': 'sqoop_test',
'num_mappers': 22,
'verbose': True,
'properties': {'mapred.map.max.attempts': '1'},
'hcatalog_database': 'hive_database',
'hcatalog_table': 'hive_table',
}
_config_export = {
'table': 'domino.export_data_to',
'export_dir': '/hdfs/data/to/be/exported',
'input_null_string': '\\n',
'input_null_non_string': '\\t',
'staging_table': 'database.staging',
'clear_staging_table': True,
'enclosed_by': '"',
'escaped_by': '\\',
'input_fields_terminated_by': '|',
'input_lines_terminated_by': '\n',
'input_optionally_enclosed_by': '"',
'batch': True,
'relaxed_isolation': True,
'extra_export_options': collections.OrderedDict(
[('update-key', 'id'), ('update-mode', 'allowinsert'), ('fetch-size', 1)]
),
}
_config_import = {
'target_dir': '/hdfs/data/target/location',
'append': True,
'file_type': 'parquet',
'split_by': '\n',
'direct': True,
'driver': 'com.microsoft.jdbc.sqlserver.SQLServerDriver',
'extra_import_options': {
'hcatalog-storage-stanza': "\"stored as orcfile\"",
'show': '',
'fetch-size': 1,
},
}
_config_json = {
'namenode': 'http://0.0.0.0:50070/',
'job_tracker': 'http://0.0.0.0:50030/',
'libjars': '/path/to/jars',
'files': '/path/to/files',
'archives': '/path/to/archives',
}
def setUp(self):
db.merge_conn(
Connection(
conn_id='sqoop_test',
conn_type='sqoop',
schema='schema',
host='rmdbs',
port=5050,
extra=json.dumps(self._config_json),
)
)
@patch('subprocess.Popen')
def test_popen(self, mock_popen):
# Given
mock_popen.return_value.stdout = StringIO('stdout')
mock_popen.return_value.stderr = StringIO('stderr')
mock_popen.return_value.returncode = 0
mock_popen.return_value.communicate.return_value = [
StringIO('stdout\nstdout'),
StringIO('stderr\nstderr'),
]
# When
hook = SqoopHook(conn_id='sqoop_test')
hook.export_table(**self._config_export)
# Then
self.assertEqual(
mock_popen.mock_calls[0],
call(
[
'sqoop',
'export',
'-fs',
self._config_json['namenode'],
'-jt',
self._config_json['job_tracker'],
'-libjars',
self._config_json['libjars'],
'-files',
self._config_json['files'],
'-archives',
self._config_json['archives'],
'--connect',
'rmdbs:5050/schema',
'--input-null-string',
self._config_export['input_null_string'],
'--input-null-non-string',
self._config_export['input_null_non_string'],
'--staging-table',
self._config_export['staging_table'],
'--clear-staging-table',
'--enclosed-by',
self._config_export['enclosed_by'],
'--escaped-by',
self._config_export['escaped_by'],
'--input-fields-terminated-by',
self._config_export['input_fields_terminated_by'],
'--input-lines-terminated-by',
self._config_export['input_lines_terminated_by'],
'--input-optionally-enclosed-by',
self._config_export['input_optionally_enclosed_by'],
'--batch',
'--relaxed-isolation',
'--export-dir',
self._config_export['export_dir'],
'--update-key',
'id',
'--update-mode',
'allowinsert',
'--fetch-size',
str(self._config_export['extra_export_options'].get('fetch-size')),
'--table',
self._config_export['table'],
],
stderr=-2,
stdout=-1,
),
)
def test_submit_none_mappers(self):
"""
Test to check that if value of num_mappers is None, then it shouldn't be in the cmd built.
"""
_config_without_mappers = self._config.copy()
_config_without_mappers['num_mappers'] = None
hook = SqoopHook(**_config_without_mappers)
cmd = ' '.join(hook._prepare_command())
self.assertNotIn('--num-mappers', cmd)
def test_submit(self):
"""
Tests to verify that from connection extra option the options are added to the Sqoop command.
"""
hook = SqoopHook(**self._config)
cmd = ' '.join(hook._prepare_command())
# Check if the config has been extracted from the json
if self._config_json['namenode']:
self.assertIn("-fs {}".format(self._config_json['namenode']), cmd)
if self._config_json['job_tracker']:
self.assertIn("-jt {}".format(self._config_json['job_tracker']), cmd)
if self._config_json['libjars']:
self.assertIn("-libjars {}".format(self._config_json['libjars']), cmd)
if self._config_json['files']:
self.assertIn("-files {}".format(self._config_json['files']), cmd)
if self._config_json['archives']:
self.assertIn("-archives {}".format(self._config_json['archives']), cmd)
self.assertIn("--hcatalog-database {}".format(self._config['hcatalog_database']), cmd)
self.assertIn("--hcatalog-table {}".format(self._config['hcatalog_table']), cmd)
# Check the regulator stuff passed by the default constructor
if self._config['verbose']:
self.assertIn("--verbose", cmd)
if self._config['num_mappers']:
self.assertIn("--num-mappers {}".format(self._config['num_mappers']), cmd)
for key, value in self._config['properties'].items():
self.assertIn(f"-D {key}={value}", cmd)
# We don't have the sqoop binary available, and this is hard to mock,
# so just accept an exception for now.
with self.assertRaises(OSError):
hook.export_table(**self._config_export)
with self.assertRaises(OSError):
hook.import_table(table='schema.table', target_dir='/sqoop/example/path')
with self.assertRaises(OSError):
hook.import_query(query='SELECT * FROM sometable', target_dir='/sqoop/example/path')
def test_export_cmd(self):
"""
Tests to verify the hook export command is building correct Sqoop export command.
"""
hook = SqoopHook()
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(
hook._export_cmd(
self._config_export['table'],
self._config_export['export_dir'],
input_null_string=self._config_export['input_null_string'],
input_null_non_string=self._config_export['input_null_non_string'],
staging_table=self._config_export['staging_table'],
clear_staging_table=self._config_export['clear_staging_table'],
enclosed_by=self._config_export['enclosed_by'],
escaped_by=self._config_export['escaped_by'],
input_fields_terminated_by=self._config_export['input_fields_terminated_by'],
input_lines_terminated_by=self._config_export['input_lines_terminated_by'],
input_optionally_enclosed_by=self._config_export['input_optionally_enclosed_by'],
batch=self._config_export['batch'],
relaxed_isolation=self._config_export['relaxed_isolation'],
extra_export_options=self._config_export['extra_export_options'],
)
)
self.assertIn("--input-null-string {}".format(self._config_export['input_null_string']), cmd)
self.assertIn("--input-null-non-string {}".format(self._config_export['input_null_non_string']), cmd)
self.assertIn("--staging-table {}".format(self._config_export['staging_table']), cmd)
self.assertIn("--enclosed-by {}".format(self._config_export['enclosed_by']), cmd)
self.assertIn("--escaped-by {}".format(self._config_export['escaped_by']), cmd)
self.assertIn(
"--input-fields-terminated-by {}".format(self._config_export['input_fields_terminated_by']), cmd
)
self.assertIn(
"--input-lines-terminated-by {}".format(self._config_export['input_lines_terminated_by']), cmd
)
self.assertIn(
"--input-optionally-enclosed-by {}".format(self._config_export['input_optionally_enclosed_by']),
cmd,
)
# these options are from the extra export options
self.assertIn("--update-key id", cmd)
self.assertIn("--update-mode allowinsert", cmd)
if self._config_export['clear_staging_table']:
self.assertIn("--clear-staging-table", cmd)
if self._config_export['batch']:
self.assertIn("--batch", cmd)
if self._config_export['relaxed_isolation']:
self.assertIn("--relaxed-isolation", cmd)
if self._config_export['extra_export_options']:
self.assertIn("--update-key", cmd)
self.assertIn("--update-mode", cmd)
self.assertIn("--fetch-size", cmd)
def test_import_cmd(self):
"""
Tests to verify the hook import command is building correct Sqoop import command.
"""
hook = SqoopHook()
# The subprocess requires an array but we build the cmd by joining on a space
cmd = ' '.join(
hook._import_cmd(
self._config_import['target_dir'],
append=self._config_import['append'],
file_type=self._config_import['file_type'],
split_by=self._config_import['split_by'],
direct=self._config_import['direct'],
driver=self._config_import['driver'],
extra_import_options=None,
)
)
if self._config_import['append']:
self.assertIn('--append', cmd)
if self._config_import['direct']:
self.assertIn('--direct', cmd)
self.assertIn('--target-dir {}'.format(self._config_import['target_dir']), cmd)
self.assertIn('--driver {}'.format(self._config_import['driver']), cmd)
self.assertIn('--split-by {}'.format(self._config_import['split_by']), cmd)
# these are from extra options, but not passed to this cmd import command
self.assertNotIn('--show', cmd)
self.assertNotIn('hcatalog-storage-stanza \"stored as orcfile\"', cmd)
cmd = ' '.join(
hook._import_cmd(
target_dir=None,
append=self._config_import['append'],
file_type=self._config_import['file_type'],
split_by=self._config_import['split_by'],
direct=self._config_import['direct'],
driver=self._config_import['driver'],
extra_import_options=self._config_import['extra_import_options'],
)
)
self.assertNotIn('--target-dir', cmd)
# these checks are from the extra import options
self.assertIn('--show', cmd)
self.assertIn('hcatalog-storage-stanza \"stored as orcfile\"', cmd)
self.assertIn('--fetch-size', cmd)
def test_get_export_format_argument(self):
"""
Tests to verify the hook get format function is building
correct Sqoop command with correct format type.
"""
hook = SqoopHook()
self.assertIn("--as-avrodatafile", hook._get_export_format_argument('avro'))
self.assertIn("--as-parquetfile", hook._get_export_format_argument('parquet'))
self.assertIn("--as-sequencefile", hook._get_export_format_argument('sequence'))
self.assertIn("--as-textfile", hook._get_export_format_argument('text'))
with self.assertRaises(AirflowException):
hook._get_export_format_argument('unknown')
def test_cmd_mask_password(self):
"""
Tests to verify the hook masking function will correctly mask a user password in Sqoop command.
"""
hook = SqoopHook()
self.assertEqual(hook.cmd_mask_password(['--password', 'supersecret']), ['--password', 'MASKED'])
cmd = ['--target', 'targettable']
self.assertEqual(hook.cmd_mask_password(cmd), cmd)
| apache-2.0 |
auready/django | tests/fixtures_regress/models.py | 72 | 8193 | from django.contrib.auth.models import User
from django.db import models
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, models.SET_NULL, null=True)
def __str__(self):
return self.name + ' is owned by ' + str(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572, #20820
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Subclass of a model with a ManyToManyField for test_ticket_20820
class SpecialArticle(Article):
pass
# Models to regression test #22421
class CommonFeature(Article):
class Meta:
abstract = True
class Feature(CommonFeature):
pass
# Models to regression test #11428
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', models.SET_NULL, null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person, models.CASCADE)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return (self.data,)
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, models.CASCADE, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
class M2MToSelf(models.Model):
parent = models.ManyToManyField("self", blank=True)
class BaseNKModel(models.Model):
"""
Base model with a natural_key and a manager with `get_by_natural_key`
"""
data = models.CharField(max_length=20, unique=True)
objects = NKManager()
class Meta:
abstract = True
def __str__(self):
return self.data
def natural_key(self):
return (self.data,)
class M2MSimpleA(BaseNKModel):
b_set = models.ManyToManyField("M2MSimpleB")
class M2MSimpleB(BaseNKModel):
pass
class M2MSimpleCircularA(BaseNKModel):
b_set = models.ManyToManyField("M2MSimpleCircularB")
class M2MSimpleCircularB(BaseNKModel):
a_set = models.ManyToManyField("M2MSimpleCircularA")
class M2MComplexA(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexB", through="M2MThroughAB")
class M2MComplexB(BaseNKModel):
pass
class M2MThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexA, models.CASCADE)
b = models.ForeignKey(M2MComplexB, models.CASCADE)
class M2MComplexCircular1A(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexCircular1B",
through="M2MCircular1ThroughAB")
class M2MComplexCircular1B(BaseNKModel):
c_set = models.ManyToManyField("M2MComplexCircular1C",
through="M2MCircular1ThroughBC")
class M2MComplexCircular1C(BaseNKModel):
a_set = models.ManyToManyField("M2MComplexCircular1A",
through="M2MCircular1ThroughCA")
class M2MCircular1ThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexCircular1A, models.CASCADE)
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
class M2MCircular1ThroughBC(BaseNKModel):
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
c = models.ForeignKey(M2MComplexCircular1C, models.CASCADE)
class M2MCircular1ThroughCA(BaseNKModel):
c = models.ForeignKey(M2MComplexCircular1C, models.CASCADE)
a = models.ForeignKey(M2MComplexCircular1A, models.CASCADE)
class M2MComplexCircular2A(BaseNKModel):
b_set = models.ManyToManyField("M2MComplexCircular2B",
through="M2MCircular2ThroughAB")
class M2MComplexCircular2B(BaseNKModel):
def natural_key(self):
return (self.data,)
# Fake the dependency for a circularity
natural_key.dependencies = ["fixtures_regress.M2MComplexCircular2A"]
class M2MCircular2ThroughAB(BaseNKModel):
a = models.ForeignKey(M2MComplexCircular2A, models.CASCADE)
b = models.ForeignKey(M2MComplexCircular2B, models.CASCADE)
| bsd-3-clause |
mfazekas/safaridriver | remote/client/src/py/command.py | 3 | 2944 | # Copyright 2010 WebDriver committers
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Command(object):
"""Defines constants for the standard commands in the wire protocol."""
# Keep in sync with org.openqa.selenium.remote.DriverCommand
NEW_SESSION = "newSession"
DELETE_SESSION = "deleteSession"
CLOSE = "close"
QUIT = "quit"
GET = "get"
GO_BACK = "goBack"
GO_FORWARD = "goForward"
REFRESH = "refresh"
ADD_COOKIE = "addCookie"
GET_COOKIE = "getCookie"
GET_ALL_COOKIES = "getCookies"
DELETE_COOKIE = "deleteCookie"
DELETE_ALL_COOKIES = "deleteAllCookies"
FIND_ELEMENT = "findElement"
FIND_ELEMENTS = "findElements"
FIND_CHILD_ELEMENT = "findChildElement"
FIND_CHILD_ELEMENTS = "findChildElements"
CLEAR_ELEMENT = "clearElement"
CLICK_ELEMENT = "clickElement"
HOVER_OVER_ELEMENT = "hoverOverElement"
SEND_KEYS_TO_ELEMENT = "sendKeysToElement"
SUBMIT_ELEMENT = "submitElement"
TOGGLE_ELEMENT = "toggleElement"
GET_CURRENT_WINDOW_HANDLE = "getCurrentWindowHandle"
GET_WINDOW_HANDLES = "getWindowHandles"
SWITCH_TO_WINDOW = "switchToWindow"
SWITCH_TO_FRAME = "switchToFrame"
SWITCH_TO_FRAME_BY_INDEX = "switchToFrameByIndex"
SWITCH_TO_FRAME_BY_NAME = "switchToFrameByName"
SWITCH_TO_DEFAULT_CONTENT = "switchToDefaultContent"
GET_ACTIVE_ELEMENT = "getActiveElement"
GET_CURRENT_URL = "getCurrentUrl"
GET_PAGE_SOURCE = "getPageSource"
GET_TITLE = "getTitle"
EXECUTE_SCRIPT = "executeScript"
GET_SPEED = "getSpeed"
SET_SPEED = "setSpeed"
SET_BROWSER_VISIBLE = "setBrowserVisible"
IS_BROWSER_VISIBLE = "isBrowserVisible"
GET_ELEMENT_TEXT = "getElementText"
GET_ELEMENT_VALUE = "getElementValue"
GET_ELEMENT_TAG_NAME = "getElementTagName"
SET_ELEMENT_SELECTED = "setElementSelected"
DRAG_ELEMENT = "dragElement"
IS_ELEMENT_SELECTED = "isElementSelected"
IS_ELEMENT_ENABLED = "isElementEnabled"
IS_ELEMENT_DISPLAYED = "isElementDisplayed"
GET_ELEMENT_LOCATION = "getElementLocation"
GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW = (
"getElementLocationOnceScrolledIntoView")
GET_ELEMENT_SIZE = "getElementSize"
GET_ELEMENT_ATTRIBUTE = "getElementAttribute"
GET_ELEMENT_VALUE_OF_CSS_PROPERTY = "getElementValueOfCssProperty"
ELEMENT_EQUALS = "elementEquals"
SCREENSHOT = "screenshot"
| apache-2.0 |
kalaidin/luigi | luigi/contrib/hdfs/hadoopcli_clients.py | 31 | 9521 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The implementations of the hdfs clients. The hadoop cli client and the
snakebite client.
"""
from luigi.target import FileAlreadyExists
from luigi.contrib.hdfs.config import load_hadoop_cmd
from luigi.contrib.hdfs import abstract_client as hdfs_abstract_client
from luigi.contrib.hdfs import config as hdfs_config
from luigi.contrib.hdfs import error as hdfs_error
import logging
import subprocess
import datetime
import os
import re
import warnings
logger = logging.getLogger('luigi-interface')
def create_hadoopcli_client():
"""
Given that we want one of the hadoop cli clients (unlike snakebite),
this one will return the right one.
"""
version = hdfs_config.get_configured_hadoop_version()
if version == "cdh4":
return HdfsClient()
elif version == "cdh3":
return HdfsClientCdh3()
elif version == "apache1":
return HdfsClientApache1()
else:
raise ValueError("Error: Unknown version specified in Hadoop version"
"configuration parameter")
class HdfsClient(hdfs_abstract_client.HdfsFileSystem):
"""
This client uses Apache 2.x syntax for file system commands, which also matched CDH4.
"""
recursive_listdir_cmd = ['-ls', '-R']
@staticmethod
def call_check(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise hdfs_error.HDFSCliError(command, p.returncode, stdout, stderr)
return stdout
def exists(self, path):
"""
Use ``hadoop fs -stat`` to check file existence.
"""
cmd = load_hadoop_cmd() + ['fs', '-stat', path]
logger.debug('Running file existence check: %s', u' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
else:
not_found_pattern = "^.*No such file or directory$"
not_found_re = re.compile(not_found_pattern)
for line in stderr.split('\n'):
if not_found_re.match(line):
return False
raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
def rename(self, path, dest):
parent_dir = os.path.dirname(dest)
if parent_dir != '' and not self.exists(parent_dir):
self.mkdir(parent_dir)
if not isinstance(path, (list, tuple)):
path = [path]
else:
warnings.warn("Renaming multiple files at once is not atomic.", stacklevel=2)
self.call_check(load_hadoop_cmd() + ['fs', '-mv'] + path + [dest])
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rm', '-r']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
self.call_check(cmd)
def chmod(self, path, permissions, recursive=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chmod', '-R', permissions, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chmod', permissions, path]
self.call_check(cmd)
def chown(self, path, owner, group, recursive=False):
if owner is None:
owner = ''
if group is None:
group = ''
ownership = "%s:%s" % (owner, group)
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chown', '-R', ownership, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chown', ownership, path]
self.call_check(cmd)
def count(self, path):
cmd = load_hadoop_cmd() + ['fs', '-count', path]
stdout = self.call_check(cmd)
lines = stdout.split('\n')
for line in stdout.split('\n'):
if line.startswith("OpenJDK 64-Bit Server VM warning") or line.startswith("It's highly recommended") or not line:
lines.pop(lines.index(line))
else:
(dir_count, file_count, content_size, ppath) = stdout.split()
results = {'content_size': content_size, 'dir_count': dir_count, 'file_count': file_count}
return results
def copy(self, path, destination):
self.call_check(load_hadoop_cmd() + ['fs', '-cp', path, destination])
def put(self, local_path, destination):
self.call_check(load_hadoop_cmd() + ['fs', '-put', local_path, destination])
def get(self, path, local_destination):
self.call_check(load_hadoop_cmd() + ['fs', '-get', path, local_destination])
def getmerge(self, path, local_destination, new_line=False):
if new_line:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', '-nl', path, local_destination]
else:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', path, local_destination]
self.call_check(cmd)
def mkdir(self, path, parents=True, raise_if_exists=False):
if parents and raise_if_exists:
raise NotImplementedError("HdfsClient.mkdir can't raise with -p")
try:
cmd = (load_hadoop_cmd() + ['fs', '-mkdir'] +
(['-p'] if parents else []) +
[path])
self.call_check(cmd)
except hdfs_error.HDFSCliError as ex:
if "File exists" in ex.stderr:
if raise_if_exists:
raise FileAlreadyExists(ex.stderr)
else:
raise
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False, recursive=False):
if not path:
path = "." # default to current/home catalog
if recursive:
cmd = load_hadoop_cmd() + ['fs'] + self.recursive_listdir_cmd + [path]
else:
cmd = load_hadoop_cmd() + ['fs', '-ls', path]
lines = self.call_check(cmd).split('\n')
for line in lines:
if not line:
continue
elif line.startswith('OpenJDK 64-Bit Server VM warning') or line.startswith('It\'s highly recommended') or line.startswith('Found'):
continue # "hadoop fs -ls" outputs "Found %d items" as its first line
elif ignore_directories and line[0] == 'd':
continue
elif ignore_files and line[0] == '-':
continue
data = line.split(' ')
file = data[-1]
size = int(data[-4])
line_type = line[0]
extra_data = ()
if include_size:
extra_data += (size,)
if include_type:
extra_data += (line_type,)
if include_time:
time_str = '%sT%s' % (data[-3], data[-2])
modification_time = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M')
extra_data += (modification_time,)
if len(extra_data) > 0:
yield (file,) + extra_data
else:
yield file
def touchz(self, path):
self.call_check(load_hadoop_cmd() + ['fs', '-touchz', path])
class HdfsClientCdh3(HdfsClient):
"""
This client uses CDH3 syntax for file system commands.
"""
def mkdir(self, path):
"""
No -p switch, so this will fail creating ancestors.
"""
try:
self.call_check(load_hadoop_cmd() + ['fs', '-mkdir', path])
except hdfs_error.HDFSCliError as ex:
if "File exists" in ex.stderr:
raise FileAlreadyExists(ex.stderr)
else:
raise
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rmr']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
self.call_check(cmd)
class HdfsClientApache1(HdfsClientCdh3):
"""
This client uses Apache 1.x syntax for file system commands,
which are similar to CDH3 except for the file existence check.
"""
recursive_listdir_cmd = ['-lsr']
def exists(self, path):
cmd = load_hadoop_cmd() + ['fs', '-test', '-e', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
elif p.returncode == 1:
return False
else:
raise hdfs_error.HDFSCliError(cmd, p.returncode, stdout, stderr)
| apache-2.0 |
yourlabs/django-documents | setup.py | 1 | 2216 | import os
import sys
from setuptools import setup, find_packages, Command
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class RunTests(Command):
description = "Run the django test suite from the testproj dir."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
this_dir = os.getcwd()
testproj_dir = os.path.join(this_dir, "test_project")
os.chdir(testproj_dir)
sys.path.append(testproj_dir)
from django.core.management import execute_manager
os.environ["DJANGO_SETTINGS_MODULE"] = 'test_project.settings'
settings_file = os.environ["DJANGO_SETTINGS_MODULE"]
settings_mod = __import__(settings_file, {}, {}, [''])
execute_manager(settings_mod, argv=[
__file__, "test", "documents"])
os.chdir(this_dir)
setup(
name='django-documents',
version='0.0.3',
description='Attach documents to django models',
author='James Pic',
author_email='jamespic@gmail.com',
url='https://github.com/yourlabs/django-documents',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
long_description=read('README.rst'),
license='MIT',
keywords='django document',
install_requires=[
'django',
'django_autoslug',
'django_generic_m2m',
'django_autocomplete_light',
],
cmdclass={'test': RunTests},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| mit |
jhseu/tensorflow | tensorflow/lite/micro/examples/magic_wand/train/data_load_test.py | 19 | 4119 | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-bad-import-order
"""Test for data_load.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from data_load import DataLoader
import tensorflow as tf
class TestLoad(unittest.TestCase):
def setUp(self): # pylint: disable=g-missing-super-call
self.loader = DataLoader(
"./data/train", "./data/valid", "./data/test", seq_length=512)
def test_get_data(self):
self.assertIsInstance(self.loader.train_data, list)
self.assertIsInstance(self.loader.train_label, list)
self.assertIsInstance(self.loader.valid_data, list)
self.assertIsInstance(self.loader.valid_label, list)
self.assertIsInstance(self.loader.test_data, list)
self.assertIsInstance(self.loader.test_label, list)
self.assertEqual(self.loader.train_len, len(self.loader.train_data))
self.assertEqual(self.loader.train_len, len(self.loader.train_label))
self.assertEqual(self.loader.valid_len, len(self.loader.valid_data))
self.assertEqual(self.loader.valid_len, len(self.loader.valid_label))
self.assertEqual(self.loader.test_len, len(self.loader.test_data))
self.assertEqual(self.loader.test_len, len(self.loader.test_label))
def test_pad(self):
original_data1 = [[2, 3], [1, 1]]
expected_data1_0 = [[2, 3], [2, 3], [2, 3], [2, 3], [1, 1]]
expected_data1_1 = [[2, 3], [1, 1], [1, 1], [1, 1], [1, 1]]
original_data2 = [[-2, 3], [-77, -681], [5, 6], [9, -7], [22, 3333],
[9, 99], [-100, 0]]
expected_data2 = [[-2, 3], [-77, -681], [5, 6], [9, -7], [22, 3333]]
padding_data1 = self.loader.pad(original_data1, seq_length=5, dim=2)
padding_data2 = self.loader.pad(original_data2, seq_length=5, dim=2)
for i in range(len(padding_data1[0])):
for j in range(len(padding_data1[0].tolist()[0])):
self.assertLess(
abs(padding_data1[0].tolist()[i][j] - expected_data1_0[i][j]),
10.001)
for i in range(len(padding_data1[1])):
for j in range(len(padding_data1[1].tolist()[0])):
self.assertLess(
abs(padding_data1[1].tolist()[i][j] - expected_data1_1[i][j]),
10.001)
self.assertEqual(padding_data2[0].tolist(), expected_data2)
self.assertEqual(padding_data2[1].tolist(), expected_data2)
def test_format(self):
self.loader.format()
expected_train_label = int(self.loader.label2id[self.loader.train_label[0]])
expected_valid_label = int(self.loader.label2id[self.loader.valid_label[0]])
expected_test_label = int(self.loader.label2id[self.loader.test_label[0]])
for feature, label in self.loader.train_data: # pylint: disable=unused-variable
format_train_label = label.numpy()
break
for feature, label in self.loader.valid_data:
format_valid_label = label.numpy()
break
for feature, label in self.loader.test_data:
format_test_label = label.numpy()
break
self.assertEqual(expected_train_label, format_train_label)
self.assertEqual(expected_valid_label, format_valid_label)
self.assertEqual(expected_test_label, format_test_label)
self.assertIsInstance(self.loader.train_data, tf.data.Dataset)
self.assertIsInstance(self.loader.valid_data, tf.data.Dataset)
self.assertIsInstance(self.loader.test_data, tf.data.Dataset)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
linkdesu/shadowsocks | shadowsocks/udprelay.py | 924 | 11154 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data = encrypt.encrypt_all(self._password, self._method, 0, data)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
data = encrypt.encrypt_all(self._password, self._method, 1, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except IOError as e:
err = eventloop.errno_from_exception(e)
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
# addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| apache-2.0 |
takeshineshiro/tornado | tornado/test/locks_test.py | 61 | 15994 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import timedelta
from tornado import gen, locks
from tornado.gen import TimeoutError
from tornado.testing import gen_test, AsyncTestCase
from tornado.test.util import unittest, skipBefore35, exec_test
class ConditionTest(AsyncTestCase):
def setUp(self):
super(ConditionTest, self).setUp()
self.history = []
def record_done(self, future, key):
"""Record the resolution of a Future returned by Condition.wait."""
def callback(_):
if not future.result():
# wait() resolved to False, meaning it timed out.
self.history.append('timeout')
else:
self.history.append(key)
future.add_done_callback(callback)
def test_repr(self):
c = locks.Condition()
self.assertIn('Condition', repr(c))
self.assertNotIn('waiters', repr(c))
c.wait()
self.assertIn('waiters', repr(c))
@gen_test
def test_notify(self):
c = locks.Condition()
self.io_loop.call_later(0.01, c.notify)
yield c.wait()
def test_notify_1(self):
c = locks.Condition()
self.record_done(c.wait(), 'wait1')
self.record_done(c.wait(), 'wait2')
c.notify(1)
self.history.append('notify1')
c.notify(1)
self.history.append('notify2')
self.assertEqual(['wait1', 'notify1', 'wait2', 'notify2'],
self.history)
def test_notify_n(self):
c = locks.Condition()
for i in range(6):
self.record_done(c.wait(), i)
c.notify(3)
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(3)), self.history)
c.notify(1)
self.assertEqual(list(range(4)), self.history)
c.notify(2)
self.assertEqual(list(range(6)), self.history)
def test_notify_all(self):
c = locks.Condition()
for i in range(4):
self.record_done(c.wait(), i)
c.notify_all()
self.history.append('notify_all')
# Callbacks execute in the order they were registered.
self.assertEqual(
list(range(4)) + ['notify_all'],
self.history)
@gen_test
def test_wait_timeout(self):
c = locks.Condition()
wait = c.wait(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, c.notify) # Too late.
yield gen.sleep(0.03)
self.assertFalse((yield wait))
@gen_test
def test_wait_timeout_preempted(self):
c = locks.Condition()
# This fires before the wait times out.
self.io_loop.call_later(0.01, c.notify)
wait = c.wait(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield wait # No TimeoutError.
@gen_test
def test_notify_n_with_timeout(self):
# Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
# Wait for that timeout to expire, then do notify(2) and make
# sure everyone runs. Verifies that a timed-out callback does
# not count against the 'n' argument to notify().
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
self.record_done(c.wait(), 3)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify(2)
yield gen.sleep(0.01)
self.assertEqual(['timeout', 0, 2], self.history)
self.assertEqual(['timeout', 0, 2], self.history)
c.notify()
self.assertEqual(['timeout', 0, 2, 3], self.history)
@gen_test
def test_notify_all_with_timeout(self):
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify_all()
self.assertEqual(['timeout', 0, 2], self.history)
@gen_test
def test_nested_notify(self):
# Ensure no notifications lost, even if notify() is reentered by a
# waiter calling notify().
c = locks.Condition()
# Three waiters.
futures = [c.wait() for _ in range(3)]
# First and second futures resolved. Second future reenters notify(),
# resolving third future.
futures[1].add_done_callback(lambda _: c.notify())
c.notify(2)
self.assertTrue(all(f.done() for f in futures))
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
c = locks.Condition()
for _ in range(101):
c.wait(timedelta(seconds=0.01))
future = c.wait()
self.assertEqual(102, len(c._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(c._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
c.notify()
self.assertTrue(future.done())
class EventTest(AsyncTestCase):
def test_repr(self):
event = locks.Event()
self.assertTrue('clear' in str(event))
self.assertFalse('set' in str(event))
event.set()
self.assertFalse('clear' in str(event))
self.assertTrue('set' in str(event))
def test_event(self):
e = locks.Event()
future_0 = e.wait()
e.set()
future_1 = e.wait()
e.clear()
future_2 = e.wait()
self.assertTrue(future_0.done())
self.assertTrue(future_1.done())
self.assertFalse(future_2.done())
@gen_test
def test_event_timeout(self):
e = locks.Event()
with self.assertRaises(TimeoutError):
yield e.wait(timedelta(seconds=0.01))
# After a timed-out waiter, normal operation works.
self.io_loop.add_timeout(timedelta(seconds=0.01), e.set)
yield e.wait(timedelta(seconds=1))
def test_event_set_multiple(self):
e = locks.Event()
e.set()
e.set()
self.assertTrue(e.is_set())
def test_event_wait_clear(self):
e = locks.Event()
f0 = e.wait()
e.clear()
f1 = e.wait()
e.set()
self.assertTrue(f0.done())
self.assertTrue(f1.done())
class SemaphoreTest(AsyncTestCase):
def test_negative_value(self):
self.assertRaises(ValueError, locks.Semaphore, value=-1)
def test_repr(self):
sem = locks.Semaphore()
self.assertIn('Semaphore', repr(sem))
self.assertIn('unlocked,value:1', repr(sem))
sem.acquire()
self.assertIn('locked', repr(sem))
self.assertNotIn('waiters', repr(sem))
sem.acquire()
self.assertIn('waiters', repr(sem))
def test_acquire(self):
sem = locks.Semaphore()
f0 = sem.acquire()
self.assertTrue(f0.done())
# Wait for release().
f1 = sem.acquire()
self.assertFalse(f1.done())
f2 = sem.acquire()
sem.release()
self.assertTrue(f1.done())
self.assertFalse(f2.done())
sem.release()
self.assertTrue(f2.done())
sem.release()
# Now acquire() is instant.
self.assertTrue(sem.acquire().done())
self.assertEqual(0, len(sem._waiters))
@gen_test
def test_acquire_timeout(self):
sem = locks.Semaphore(2)
yield sem.acquire()
yield sem.acquire()
acquire = sem.acquire(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, sem.release) # Too late.
yield gen.sleep(0.3)
with self.assertRaises(gen.TimeoutError):
yield acquire
sem.acquire()
f = sem.acquire()
self.assertFalse(f.done())
sem.release()
self.assertTrue(f.done())
@gen_test
def test_acquire_timeout_preempted(self):
sem = locks.Semaphore(1)
yield sem.acquire()
# This fires before the wait times out.
self.io_loop.call_later(0.01, sem.release)
acquire = sem.acquire(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield acquire # No TimeoutError.
def test_release_unacquired(self):
# Unbounded releases are allowed, and increment the semaphore's value.
sem = locks.Semaphore()
sem.release()
sem.release()
# Now the counter is 3. We can acquire three times before blocking.
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertFalse(sem.acquire().done())
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
sem = locks.Semaphore(value=0)
futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]
future = sem.acquire()
self.assertEqual(102, len(sem._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(sem._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Prevent "Future exception was never retrieved" messages.
for future in futures:
self.assertRaises(TimeoutError, future.result)
class SemaphoreContextManagerTest(AsyncTestCase):
@gen_test
def test_context_manager(self):
sem = locks.Semaphore()
with (yield sem.acquire()) as yielded:
self.assertTrue(yielded is None)
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@skipBefore35
@gen_test
def test_context_manager_async_await(self):
# Repeat the above test using 'async with'.
sem = locks.Semaphore()
namespace = exec_test(globals(), locals(), """
async def f():
async with sem as yielded:
self.assertTrue(yielded is None)
""")
yield namespace['f']()
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_exception(self):
sem = locks.Semaphore()
with self.assertRaises(ZeroDivisionError):
with (yield sem.acquire()):
1 / 0
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout(self):
sem = locks.Semaphore()
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout_error(self):
sem = locks.Semaphore(value=0)
with self.assertRaises(gen.TimeoutError):
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Counter is still 0.
self.assertFalse(sem.acquire().done())
@gen_test
def test_context_manager_contended(self):
sem = locks.Semaphore()
history = []
@gen.coroutine
def f(index):
with (yield sem.acquire()):
history.append('acquired %d' % index)
yield gen.sleep(0.01)
history.append('release %d' % index)
yield [f(i) for i in range(2)]
expected_history = []
for i in range(2):
expected_history.extend(['acquired %d' % i, 'release %d' % i])
self.assertEqual(expected_history, history)
@gen_test
def test_yield_sem(self):
# Ensure we catch a "with (yield sem)", which should be
# "with (yield sem.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Semaphore()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with sem", which should be
# "with (yield sem.acquire())".
with self.assertRaises(RuntimeError):
with locks.Semaphore():
pass
class BoundedSemaphoreTest(AsyncTestCase):
def test_release_unacquired(self):
sem = locks.BoundedSemaphore()
self.assertRaises(ValueError, sem.release)
# Value is 0.
sem.acquire()
# Block on acquire().
future = sem.acquire()
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Value is 1.
sem.release()
self.assertRaises(ValueError, sem.release)
class LockTests(AsyncTestCase):
def test_repr(self):
lock = locks.Lock()
# No errors.
repr(lock)
lock.acquire()
repr(lock)
def test_acquire_release(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
future = lock.acquire()
self.assertFalse(future.done())
lock.release()
self.assertTrue(future.done())
@gen_test
def test_acquire_fifo(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
@gen.coroutine
def f(idx):
with (yield lock.acquire()):
history.append(idx)
futures = [f(i) for i in range(N)]
self.assertFalse(any(future.done() for future in futures))
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@skipBefore35
@gen_test
def test_acquire_fifo_async_with(self):
# Repeat the above test using `async with lock:`
# instead of `with (yield lock.acquire()):`.
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
namespace = exec_test(globals(), locals(), """
async def f(idx):
async with lock:
history.append(idx)
""")
futures = [namespace['f'](i) for i in range(N)]
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@gen_test
def test_acquire_timeout(self):
lock = locks.Lock()
lock.acquire()
with self.assertRaises(gen.TimeoutError):
yield lock.acquire(timeout=timedelta(seconds=0.01))
# Still locked.
self.assertFalse(lock.acquire().done())
def test_multi_release(self):
lock = locks.Lock()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.release()
self.assertRaises(RuntimeError, lock.release)
@gen_test
def test_yield_lock(self):
# Ensure we catch a "with (yield lock)", which should be
# "with (yield lock.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Lock()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with lock", which should be
# "with (yield lock.acquire())".
with self.assertRaises(RuntimeError):
with locks.Lock():
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.