repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Neural-Network/TicTacToe
pybrain/rl/environments/renderer.py
31
1454
# obsolete - should be deleted if there are no objections. __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de' from pybrain.utilities import abstractMethod import threading class Renderer(threading.Thread): """ The general interface for a class displays what is happening in an environment. The renderer is executed as concurrent thread. Start the renderer with the function start() inherited from Thread, and check with isAlive(), if the thread is running. """ def __init__(self): """ initializes some variables and parent init functions """ threading.Thread.__init__(self) def updateData(self): """ overwrite this class to update whatever data the renderer needs to display the current state of the world. """ abstractMethod() def _render(self): """ Here, the render methods are called. This function has to be implemented by subclasses. """ abstractMethod() def start(self): """ wrapper for Thread.start(). only calls start if thread has not been started before. """ if not self.isAlive(): threading.Thread.start(self) def run(self): """ Don't call this function on its own. Use start() instead. """ self._render() def stop(self): """ stop signal requested. stop current thread. @note: only if possible. OpenGL glutMainLoop is not stoppable. """ pass
bsd-3-clause
kevclarx/ansible
lib/ansible/modules/remote_management/ipmi/ipmi_boot.py
69
6041
#!/usr/bin/python # -*- coding: utf-8 -*- # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ipmi_boot short_description: Management of order of boot devices description: - Use this module to manage order of boot devices version_added: "2.2" options: name: description: - Hostname or ip address of the BMC. required: true port: description: - Remote RMCP port. required: false default: 623 user: description: - Username to use to connect to the BMC. required: true password: description: - Password to connect to the BMC. required: true default: null bootdev: description: - Set boot device to use on next reboot required: true choices: - network -- Request network boot - hd -- Boot from hard drive - safe -- Boot from hard drive, requesting 'safe mode' - optical -- boot from CD/DVD/BD drive - setup -- Boot into setup utility - default -- remove any IPMI directed boot device request state: description: - Whether to ensure that boot devices is desired. default: present choices: - present -- Request system turn on - absent -- Request system turn on persistent: description: - If set, ask that system firmware uses this device beyond next boot. Be aware many systems do not honor this. required: false type: bool default: false uefiboot: description: - If set, request UEFI boot explicitly. Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option. In practice, this flag not being set does not preclude UEFI boot on any system I've encountered. required: false type: bool default: false requirements: - "python >= 2.6" - pyghmi author: "Bulat Gaifullin (gaifullinbf@gmail.com)" ''' RETURN = ''' bootdev: description: The boot device name which will be used beyond next boot. returned: success type: string sample: default persistent: description: If True, system firmware will use this device beyond next boot. returned: success type: bool sample: false uefimode: description: If True, system firmware will use UEFI boot explicitly beyond next boot. returned: success type: bool sample: false ''' EXAMPLES = ''' # Ensure bootdevice is HD. - ipmi_boot: name: test.testdomain.com user: admin password: password bootdev: hd # Ensure bootdevice is not Network - ipmi_boot: name: test.testdomain.com user: admin password: password bootdev: network state: absent ''' try: from pyghmi.ipmi import command except ImportError: command = None from ansible.module_utils.basic import * def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), port=dict(default=623, type='int'), user=dict(required=True, no_log=True), password=dict(required=True, no_log=True), state=dict(default='present', choices=['present', 'absent']), bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']), persistent=dict(default=False, type='bool'), uefiboot=dict(default=False, type='bool') ), supports_check_mode=True, ) if command is None: module.fail_json(msg='the python pyghmi module is required') name = module.params['name'] port = module.params['port'] user = module.params['user'] password = module.params['password'] state = module.params['state'] bootdev = module.params['bootdev'] persistent = module.params['persistent'] uefiboot = module.params['uefiboot'] request = dict() if state == 'absent' and bootdev == 'default': module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.") # --- run command --- try: ipmi_cmd = command.Command( bmc=name, userid=user, password=password, port=port ) module.debug('ipmi instantiated - name: "%s"' % name) current = ipmi_cmd.get_bootdev() # uefimode may not supported by BMC, so use desired value as default current.setdefault('uefimode', uefiboot) if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot): request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent) elif state == 'absent' and current['bootdev'] == bootdev: request = dict(bootdev='default') else: module.exit_json(changed=False, **current) if module.check_mode: response = dict(bootdev=request['bootdev']) else: response = ipmi_cmd.set_bootdev(**request) if 'error' in response: module.fail_json(msg=response['error']) if 'persist' in request: response['persistent'] = request['persist'] if 'uefiboot' in request: response['uefimode'] = request['uefiboot'] module.exit_json(changed=True, **response) except Exception as e: module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
firerszd/kbengine
kbe/src/lib/python/Lib/lib2to3/fixes/fix_renames.py
203
2221
"""Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize """ # Author: Christian Heimes # based on Collin Winter's fix_import # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {"sys": {"maxint" : "maxsize"}, } LOOKUP = {} def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield """ # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # """ % (module, module) yield """ import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > """ % (module, old_attr, old_attr) yield """ power< module_name=%r trailer< '.' attr_name=%r > any* > """ % (module, old_attr) #yield """bare_name=%s""" % alternates(bare) class FixRenames(fixer_base.BaseFix): BM_compatible = True PATTERN = "|".join(build_pattern()) order = "pre" # Pre-order tree traversal # Don't match the node if it's within another match def match(self, node): match = super(FixRenames, self).match results = match(node) if results: if any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False #def start_tree(self, tree, filename): # super(FixRenames, self).start_tree(tree, filename) # self.replace = {} def transform(self, node, results): mod_name = results.get("module_name") attr_name = results.get("attr_name") #bare_name = results.get("bare_name") #import_mod = results.get("module") if mod_name and attr_name: new_attr = LOOKUP[(mod_name.value, attr_name.value)] attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
lgpl-3.0
ossdemura/django-miniblog
src/Lib/site-packages/django/templatetags/l10n.py
337
1735
from django.template import Library, Node, TemplateSyntaxError from django.utils import formats from django.utils.encoding import force_text register = Library() @register.filter(is_safe=False) def localize(value): """ Forces a value to be rendered as a localized value, regardless of the value of ``settings.USE_L10N``. """ return force_text(formats.localize(value, use_l10n=True)) @register.filter(is_safe=False) def unlocalize(value): """ Forces a value to be rendered as a non-localized value, regardless of the value of ``settings.USE_L10N``. """ return force_text(value) class LocalizeNode(Node): def __init__(self, nodelist, use_l10n): self.nodelist = nodelist self.use_l10n = use_l10n def __repr__(self): return "<LocalizeNode>" def render(self, context): old_setting = context.use_l10n context.use_l10n = self.use_l10n output = self.nodelist.render(context) context.use_l10n = old_setting return output @register.tag('localize') def localize_tag(parser, token): """ Forces or prevents localization of values, regardless of the value of `settings.USE_L10N`. Sample usage:: {% localize off %} var pi = {{ 3.1415 }}; {% endlocalize %} """ use_l10n = None bits = list(token.split_contents()) if len(bits) == 1: use_l10n = True elif len(bits) > 2 or bits[1] not in ('on', 'off'): raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0]) else: use_l10n = bits[1] == 'on' nodelist = parser.parse(('endlocalize',)) parser.delete_first_token() return LocalizeNode(nodelist, use_l10n)
mit
jmartinezchaine/OpenERP
openerp/pychart/line_style_doc.py
15
1327
# -*- coding: utf-8 -*- # automatically generated by generate_docs.py. doc="""Attributes supported by this class are: color(type:color.T) default="The color of the line.". width(type:length in points (\\xref{unit})) default="Width of the line, in points.". dash(type:tuple) default="The value of None will draw a solid line. Otherwise, this attribute specifies the style of dashed lines. The 2N'th value specifies the length of the line (in points), and 2N+1'th value specifies the length of the blank. For example, the dash style of (3,2,4,1) draws a dashed line that looks like @samp{---__----_---__----_...}. ". cap_style(type:int) default="Defines the style of the tip of the line segment. 0: butt cap (square cutoff, with no projection beyond), 1: round cap (arc), 2: projecting square cap (square cutoff, but the line extends half the line width). See also Postscript/PDF reference manual.". join_style(type:int) default="Join style. 0: Miter join (sharp, pointed corners), 1: round join (rounded corners), 2: bevel join (flattened corners). See also Postscript/PDF reference manual.". """
agpl-3.0
xiandiancloud/edx-platform
lms/djangoapps/staticbook/tests.py
30
10797
""" Test the lms/staticbook views. """ import textwrap import mock import requests from django.test.utils import override_settings from django.core.urlresolvers import reverse, NoReverseMatch from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from student.tests.factories import UserFactory, CourseEnrollmentFactory from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase IMAGE_BOOK = ("An Image Textbook", "http://example.com/the_book/") PDF_BOOK = { "tab_title": "Textbook", "title": "A PDF Textbook", "chapters": [ {"title": "Chapter 1 for PDF", "url": "https://somehost.com/the_book/chap1.pdf"}, {"title": "Chapter 2 for PDF", "url": "https://somehost.com/the_book/chap2.pdf"}, ], } PORTABLE_PDF_BOOK = { "tab_title": "Textbook", "title": "A PDF Textbook", "chapters": [ {"title": "Chapter 1 for PDF", "url": "/static/chap1.pdf"}, {"title": "Chapter 2 for PDF", "url": "/static/chap2.pdf"}, ], } HTML_BOOK = { "tab_title": "Textbook", "title": "An HTML Textbook", "chapters": [ {"title": "Chapter 1 for HTML", "url": "https://somehost.com/the_book/chap1.html"}, {"title": "Chapter 2 for HTML", "url": "https://somehost.com/the_book/chap2.html"}, ], } @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class StaticBookTest(ModuleStoreTestCase): """ Helpers for the static book tests. """ def __init__(self, *args, **kwargs): super(StaticBookTest, self).__init__(*args, **kwargs) self.course = None def make_course(self, **kwargs): """ Make a course with an enrolled logged-in student. """ self.course = CourseFactory.create(**kwargs) user = UserFactory.create() CourseEnrollmentFactory.create(user=user, course_id=self.course.id) self.client.login(username=user.username, password='test') def make_url(self, url_name, **kwargs): """ Make a URL for a `url_name` using keyword args for url slots. Automatically provides the course id. """ kwargs['course_id'] = self.course.id.to_deprecated_string() url = reverse(url_name, kwargs=kwargs) return url class StaticImageBookTest(StaticBookTest): """ Test the image-based static book view. """ def test_book(self): # We can access a book. with mock.patch.object(requests, 'get') as mock_get: mock_get.return_value.text = textwrap.dedent('''\ <?xml version="1.0"?> <table_of_contents> <entry page="9" page_label="ix" name="Contents!?"/> <entry page="1" page_label="i" name="Preamble"> <entry page="4" page_label="iv" name="About the Elephants"/> </entry> </table_of_contents> ''') self.make_course(textbooks=[IMAGE_BOOK]) url = self.make_url('book', book_index=0) response = self.client.get(url) self.assertContains(response, "Contents!?") self.assertContains(response, "About the Elephants") def test_bad_book_id(self): # A bad book id will be a 404. self.make_course(textbooks=[IMAGE_BOOK]) with self.assertRaises(NoReverseMatch): self.make_url('book', book_index='fooey') def test_out_of_range_book_id(self): self.make_course() url = self.make_url('book', book_index=0) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_bad_page_id(self): # A bad page id will cause a 404. self.make_course(textbooks=[IMAGE_BOOK]) with self.assertRaises(NoReverseMatch): self.make_url('book', book_index=0, page='xyzzy') class StaticPdfBookTest(StaticBookTest): """ Test the PDF static book view. """ def test_book(self): # We can access a book. self.make_course(pdf_textbooks=[PDF_BOOK]) url = self.make_url('pdf_book', book_index=0) response = self.client.get(url) self.assertContains(response, "Chapter 1 for PDF") self.assertNotContains(response, "options.chapterNum =") self.assertNotContains(response, "page=") def test_book_chapter(self): # We can access a book at a particular chapter. self.make_course(pdf_textbooks=[PDF_BOOK]) url = self.make_url('pdf_book', book_index=0, chapter=2) response = self.client.get(url) self.assertContains(response, "Chapter 2 for PDF") self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) self.assertNotContains(response, "page=") def test_book_page(self): # We can access a book at a particular page. self.make_course(pdf_textbooks=[PDF_BOOK]) url = self.make_url('pdf_book', book_index=0, page=17) response = self.client.get(url) self.assertContains(response, "Chapter 1 for PDF") self.assertNotContains(response, "options.chapterNum =") self.assertContains(response, "page=17") def test_book_chapter_page(self): # We can access a book at a particular chapter and page. self.make_course(pdf_textbooks=[PDF_BOOK]) url = self.make_url('pdf_book', book_index=0, chapter=2, page=17) response = self.client.get(url) self.assertContains(response, "Chapter 2 for PDF") self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) self.assertContains(response, "page=17") def test_bad_book_id(self): # If the book id isn't an int, we'll get a 404. self.make_course(pdf_textbooks=[PDF_BOOK]) with self.assertRaises(NoReverseMatch): self.make_url('pdf_book', book_index='fooey', chapter=1) def test_out_of_range_book_id(self): # If we have one book, asking for the second book will fail with a 404. self.make_course(pdf_textbooks=[PDF_BOOK]) url = self.make_url('pdf_book', book_index=1, chapter=1) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_no_book(self): # If we have no books, asking for the first book will fail with a 404. self.make_course() url = self.make_url('pdf_book', book_index=0, chapter=1) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_chapter_xss(self): # The chapter in the URL used to go right on the page. self.make_course(pdf_textbooks=[PDF_BOOK]) # It's no longer possible to use a non-integer chapter. with self.assertRaises(NoReverseMatch): self.make_url('pdf_book', book_index=0, chapter='xyzzy') def test_page_xss(self): # The page in the URL used to go right on the page. self.make_course(pdf_textbooks=[PDF_BOOK]) # It's no longer possible to use a non-integer page. with self.assertRaises(NoReverseMatch): self.make_url('pdf_book', book_index=0, page='xyzzy') def test_chapter_page_xss(self): # The page in the URL used to go right on the page. self.make_course(pdf_textbooks=[PDF_BOOK]) # It's no longer possible to use a non-integer page and a non-integer chapter. with self.assertRaises(NoReverseMatch): self.make_url('pdf_book', book_index=0, chapter='fooey', page='xyzzy') def test_static_url_map_contentstore(self): """ This ensure static URL mapping is happening properly for a course that uses the contentstore """ self.make_course(pdf_textbooks=[PORTABLE_PDF_BOOK]) url = self.make_url('pdf_book', book_index=0, chapter=1) response = self.client.get(url) self.assertNotContains(response, 'file={}'.format(PORTABLE_PDF_BOOK['chapters'][0]['url'])) self.assertContains(response, 'file=/c4x/{0.org}/{0.course}/asset/{1}'.format( self.course.location, PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', ''))) def test_static_url_map_static_asset_path(self): """ Like above, but used when the course has set a static_asset_path """ self.make_course(pdf_textbooks=[PORTABLE_PDF_BOOK], static_asset_path='awesomesauce') url = self.make_url('pdf_book', book_index=0, chapter=1) response = self.client.get(url) self.assertNotContains(response, 'file={}'.format(PORTABLE_PDF_BOOK['chapters'][0]['url'])) self.assertNotContains(response, 'file=/c4x/{0.org}/{0.course}/asset/{1}'.format( self.course.location, PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', ''))) self.assertContains(response, 'file=/static/awesomesauce/{}'.format( PORTABLE_PDF_BOOK['chapters'][0]['url'].replace('/static/', ''))) class StaticHtmlBookTest(StaticBookTest): """ Test the HTML static book view. """ def test_book(self): # We can access a book. self.make_course(html_textbooks=[HTML_BOOK]) url = self.make_url('html_book', book_index=0) response = self.client.get(url) self.assertContains(response, "Chapter 1 for HTML") self.assertNotContains(response, "options.chapterNum =") def test_book_chapter(self): # We can access a book at a particular chapter. self.make_course(html_textbooks=[HTML_BOOK]) url = self.make_url('html_book', book_index=0, chapter=2) response = self.client.get(url) self.assertContains(response, "Chapter 2 for HTML") self.assertContains(response, "options.chapterNum = 2;") def test_bad_book_id(self): # If we have one book, asking for the second book will fail with a 404. self.make_course(html_textbooks=[HTML_BOOK]) url = self.make_url('html_book', book_index=1, chapter=1) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_no_book(self): # If we have no books, asking for the first book will fail with a 404. self.make_course() url = self.make_url('html_book', book_index=0, chapter=1) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_chapter_xss(self): # The chapter in the URL used to go right on the page. self.make_course(pdf_textbooks=[HTML_BOOK]) # It's no longer possible to use a non-integer chapter. with self.assertRaises(NoReverseMatch): self.make_url('html_book', book_index=0, chapter='xyzzy')
agpl-3.0
PatidarWeb/poedit
deps/boost/tools/build/test/core_actions_quietly.py
51
1038
#!/usr/bin/python # Copyright 2007 Rene Rivera. # Copyright 2011 Steven Watanabe # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) import BoostBuild t = BoostBuild.Tester(pass_toolset=0) t.write("file.jam", """\ actions quietly .a. { echo [$(<:B)] 0 echo [$(<:B)] 1 echo [$(<:B)] 2 } rule .a. { DEPENDS $(<) : $(>) ; } NOTFILE subtest ; .a. subtest_a : subtest ; .a. subtest_b : subtest ; DEPENDS all : subtest_a subtest_b ; """) t.run_build_system(["-ffile.jam", "-d2"], stdout="""\ ...found 4 targets... ...updating 2 targets... .a. subtest_a echo [subtest_a] 0 echo [subtest_a] 1 echo [subtest_a] 2 [subtest_a] 0 [subtest_a] 1 [subtest_a] 2 .a. subtest_b echo [subtest_b] 0 echo [subtest_b] 1 echo [subtest_b] 2 [subtest_b] 0 [subtest_b] 1 [subtest_b] 2 ...updated 2 targets... """) t.run_build_system(["-ffile.jam", "-d1"], stdout="""\ ...found 4 targets... ...updating 2 targets... ...updated 2 targets... """) t.cleanup()
mit
arvinddoraiswamy/mywebappscripts
BurpExtensions/third_party_referer_record.py
4
1651
from burp import IBurpExtender from burp import IHttpListener from burp import IProxyListener import re import sys import os urls_in_scope=['testblah.com','qa.ooboob.com'] #Adding directory to the path where Python searches for modules module_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/WebAppsec/BurpExtensions/modules/') sys.path.insert(0, module_folder) import webcommon class BurpExtender(IBurpExtender, IHttpListener, IProxyListener): def registerExtenderCallbacks(self,callbacks): # Get a reference to the Burp helpers object self._helpers = callbacks.getHelpers() # set our extension name callbacks.setExtensionName("Third Party Referer") # register ourselves as an HTTP listener callbacks.registerHttpListener(self) # register ourselves as a Proxy listener callbacks.registerProxyListener(self) def processProxyMessage(self,messageIsRequest,message): request_http_service=message.getMessageInfo().getHttpService() request_byte_array=message.getMessageInfo().getRequest() request_object=self._helpers.analyzeRequest(request_http_service, request_byte_array) #Extract hostname from header hostname=webcommon.get_host_header_from_request(self,request_object) #Check if the URL is NOT in scope. We want to look at referers for the requests that are made to OTHER domains. if (hostname) and (hostname[1] not in urls_in_scope): #Extract referer from header referer=webcommon.get_referer_header_from_request(self,request_object) if referer: t1=referer[1].split('/') if t1[2] in urls_in_scope: print referer[1]
mit
chvrga/outdoor-explorer
java/play-1.4.4/python/Lib/multiprocessing/forking.py
3
14804
# # Module for starting a process object using os.fork() or CreateProcess() # # multiprocessing/forking.py # # Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt # import os import sys import signal from multiprocessing import util, process __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler'] # # Check that the current thread is spawning a child process # def assert_spawning(self): if not Popen.thread_is_spawning(): raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(self).__name__ ) # # Try making some callable types picklable # from pickle import Pickler class ForkingPickler(Pickler): dispatch = Pickler.dispatch.copy() @classmethod def register(cls, type, reduce): def dispatcher(self, obj): rv = reduce(obj) self.save_reduce(obj=obj, *rv) cls.dispatch[type] = dispatcher def _reduce_method(m): if m.im_self is None: return getattr, (m.im_class, m.im_func.func_name) else: return getattr, (m.im_self, m.im_func.func_name) ForkingPickler.register(type(ForkingPickler.save), _reduce_method) def _reduce_method_descriptor(m): return getattr, (m.__objclass__, m.__name__) ForkingPickler.register(type(list.append), _reduce_method_descriptor) ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) #def _reduce_builtin_function_or_method(m): # return getattr, (m.__self__, m.__name__) #ForkingPickler.register(type(list().append), _reduce_builtin_function_or_method) #ForkingPickler.register(type(int().__add__), _reduce_builtin_function_or_method) try: from functools import partial except ImportError: pass else: def _reduce_partial(p): return _rebuild_partial, (p.func, p.args, p.keywords or {}) def _rebuild_partial(func, args, keywords): return partial(func, *args, **keywords) ForkingPickler.register(partial, _reduce_partial) # # Unix # if sys.platform != 'win32': import time exit = os._exit duplicate = os.dup close = os.close # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): def __init__(self, process_obj): sys.stdout.flush() sys.stderr.flush() self.returncode = None self.pid = os.fork() if self.pid == 0: if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() sys.stdout.flush() sys.stderr.flush() os._exit(code) def poll(self, flag=os.WNOHANG): if self.returncode is None: pid, sts = os.waitpid(self.pid, flag) if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if timeout is None: return self.poll(0) deadline = time.time() + timeout delay = 0.0005 while 1: res = self.poll() if res is not None: break remaining = deadline - time.time() if remaining <= 0: break delay = min(delay * 2, remaining, 0.05) time.sleep(delay) return res def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError, e: if self.wait(timeout=0.1) is None: raise @staticmethod def thread_is_spawning(): return False # # Windows # else: import thread import msvcrt import _subprocess import time from ._multiprocessing import win32, Connection, PipeConnection from .util import Finalize #try: # from cPickle import dump, load, HIGHEST_PROTOCOL #except ImportError: from pickle import load, HIGHEST_PROTOCOL def dump(obj, file, protocol=None): ForkingPickler(file, protocol).dump(obj) # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) exit = win32.ExitProcess close = win32.CloseHandle # # _python_exe is the assumed path to the python executable. # People embedding Python want to modify it. # if sys.executable.lower().endswith('pythonservice.exe'): _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe # # # def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() return _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS ).Detach() # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' _tls = thread._local() def __init__(self, process_obj): # create pipe for communication with child rfd, wfd = os.pipe() # get handle for read end of the pipe and make it inheritable rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) os.close(rfd) # start process cmd = get_command_line() + [rhandle] cmd = ' '.join('"%s"' % x for x in cmd) hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) ht.Close() close(rhandle) # set attributes of self self.pid = pid self.returncode = None self._handle = hp # send information to child prep_data = get_preparation_data(process_obj._name) to_child = os.fdopen(wfd, 'wb') Popen._tls.process_handle = int(hp) try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del Popen._tls.process_handle to_child.close() @staticmethod def thread_is_spawning(): return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return duplicate(handle, Popen._tls.process_handle) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _subprocess.TerminateProcess(int(self._handle), TERMINATE) except WindowsError: if self.wait(timeout=0.1) is None: raise # # # def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': assert len(argv) == 3 return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): main() sys.exit() def get_command_line(): ''' Returns prefix of command line used for spawning a child process ''' if process.current_process()._identity==() and is_forking(sys.argv): raise RuntimeError(''' Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that you are on Windows and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.''') if getattr(sys, 'frozen', False): return [sys.executable, '--multiprocessing-fork'] else: prog = 'from multiprocessing.forking import main; main()' return [_python_exe, '-c', prog, '--multiprocessing-fork'] def main(): ''' Run code specifed by data received over pipe ''' assert is_forking(sys.argv) handle = int(sys.argv[-1]) fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) from_parent = os.fdopen(fd, 'rb') process.current_process()._inheriting = True preparation_data = load(from_parent) prepare(preparation_data) self = load(from_parent) process.current_process()._inheriting = False from_parent.close() exitcode = self._bootstrap() exit(exitcode) def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' from .util import _logger, _log_to_stderr d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if not os.path.isabs(main_path) and \ process.ORIGINAL_DIR is not None: main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['main_path'] = os.path.normpath(main_path) return d # # Make (Pipe)Connection picklable # def reduce_connection(conn): if not Popen.thread_is_spawning(): raise RuntimeError( 'By default %s objects can only be shared between processes\n' 'using inheritance' % type(conn).__name__ ) return type(conn), (Popen.duplicate_for_child(conn.fileno()), conn.readable, conn.writable) ForkingPickler.register(Connection, reduce_connection) ForkingPickler.register(PipeConnection, reduce_connection) # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' old_main_modules.append(sys.modules['__main__']) if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process()._authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'main_path' in data: main_path = data['main_path'] main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == '__init__': main_name = os.path.basename(os.path.dirname(main_path)) if main_name != 'ipython': import imp if main_path is None: dirs = None elif os.path.basename(main_path).startswith('__init__.py'): dirs = [os.path.dirname(os.path.dirname(main_path))] else: dirs = [os.path.dirname(main_path)] assert main_name not in sys.modules, main_name file, path_name, etc = imp.find_module(main_name, dirs) try: # We would like to do "imp.load_module('__main__', ...)" # here. However, that would cause 'if __name__ == # "__main__"' clauses to be executed. main_module = imp.load_module( '__parents_main__', file, path_name, etc ) finally: if file: file.close() sys.modules['__main__'] = main_module main_module.__name__ = '__main__' # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. for obj in main_module.__dict__.values(): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' except Exception: pass
mit
Diwaniya-Labs/django-rest-forum
discussion/serializers.py
1
1857
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from rest_framework import serializers from models import Category, Post, Announcement # initiate logger logging.getLogger(__name__) class CategorySerializer(serializers.HyperlinkedModelSerializer): """ CategorySerializer """ class Meta: model = Category class PostRepliesSerializer(serializers.HyperlinkedModelSerializer): """ PostRepliesSerializer will return replies of a specific post """ class Meta: model = Post class PostSerializer(serializers.HyperlinkedModelSerializer): """ PostSerializer """ created_by = serializers.IntegerField(source='created_by', read_only=True) replies = serializers.SerializerMethodField('get_replies') class Meta: model = Post def get_replies(self, obj): serializer = PostRepliesSerializer(Post.objects.filter(reply_to=obj), many=True, context={'request': self.context.get('request')}) return serializer.data def validate(self, attrs): """ """ if attrs['reply_to']: if attrs['reply_to'].reply_to: raise serializers.ValidationError('You cannot reply to a reply') return attrs class AnnouncementSerializer(serializers.HyperlinkedModelSerializer): """ AnnouncementSerializer """ marked_as_read = serializers.SerializerMethodField('get_marked_as_read_flag') class Meta: model = Announcement exclude = ('mark_as_read', 'created_by') def get_marked_as_read_flag(self, obj): user = self.context.get('request').user if user.is_anonymous(): return False if obj.mark_as_read.filter(pk=user.pk).count(): return True return False
lgpl-3.0
xavfernandez/packaging
packaging/__about__.py
1
1078
# Copyright 2014 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function __all__ = [ "__title__", "__summary__", "__uri__", "__version__", "__author__", "__email__", "__license__", "__copyright__", ] __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "15.3.dev0" __author__ = "Donald Stufft" __email__ = "donald@stufft.io" __license__ = "Apache License, Version 2.0" __copyright__ = "Copyright 2014 %s" % __author__
apache-2.0
mpatacchiola/naogui
zpgc_2016b/include/pynaoqi-python2.7-2.1.3.3-linux64/qi/test/test_async.py
4
1534
#! /usr/bin/python2 import qi def add(a, b): return a + b def fail(): assert(False) def err(): raise RuntimeError("sdsd") def test_async_fun(): f = qi.async(add, 21, 21) assert(f.value() == 42) def test_async_error(): f = qi.async(err) assert(f.hasError() == True) assert(f.error().startswith("RuntimeError: sdsd")) class Adder: def __init__(self): self.v = 0 def add(self, a): self.v += a return self.v def val(self): return self.v def test_async_meth(): ad = Adder() f = qi.async(ad.add, 21) assert(f.value() == 21) f = qi.async(ad.add, 21) assert(f.value() == 42) f = qi.async(ad.val) assert(f.value() == 42) def test_async_delay(): f = qi.async(add, 21, 21, delay=1000) assert(f.value() == 42) result = 0 import time def test_periodic_task(): t = qi.PeriodicTask() def add(): global result result += 1 t.setCallback(add) t.setUsPeriod(1000) t.start(True) time.sleep(1) t.stop() assert result > 5 #how to find 5: plouf plouf plouf cur = result time.sleep(1) assert cur == result def test_async_cancel(): f = qi.async(fail, delay=1000000) f.cancel() f.wait() assert(f.isFinished()) assert(not f.hasError()) assert(f.isCanceled()) def main(): test_async_fun() test_async_error() test_async_meth() test_async_delay() test_periodic_task() test_async_cancel() if __name__ == "__main__": main()
mit
inveniosoftware/es-jsonschema
setup.py
2
4456
# -*- coding: utf-8 -*- # # This file is part of DoMapping. # Copyright (C) 2015, 2016, 2017 CERN. # # DoMapping is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # DoMapping is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with DoMapping; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """DoMapping generates Elasticsearch mappings from JSON Schemas.""" import os import sys from setuptools import find_packages, setup from setuptools.command.test import test as TestCommand readme = open('README.rst').read() history = open('CHANGES.rst').read() tests_require = [ 'check-manifest>=0.25', 'coverage>=4.0', 'isort>=4.2.2', 'pydocstyle>=1.0.0', 'pytest-cache>=1.0', 'pytest-cov>=1.8.0', 'pytest-pep8>=1.0.6', 'pytest>=2.8.0', 'responses>=0.5.1' ] extras_require = { 'docs': [ "Sphinx>=1.4.2", ], 'tests': tests_require, } extras_require['all'] = [] for reqs in extras_require.values(): extras_require['all'].extend(reqs) setup_requires = [ ] install_requires = [ 'jsonschema>=2.5.0', 'six>=1.9', 'Jinja2>=2.7', 'click>=5.1', ] packages = find_packages() class PyTest(TestCommand): """PyTest Test.""" user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): """Init pytest.""" TestCommand.initialize_options(self) self.pytest_args = [] try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser config = ConfigParser() config.read('pytest.ini') self.pytest_args = config.get('pytest', 'addopts').split(' ') def finalize_options(self): """Finalize pytest.""" TestCommand.finalize_options(self) if hasattr(self, '_test_args'): self.test_suite = '' else: self.test_args = [] self.test_suite = True def run_tests(self): """Run tests.""" # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) # Get the version string. Cannot be done with import! g = {} with open(os.path.join('domapping', 'version.py'), 'rt') as fp: exec(fp.read(), g) version = g['__version__'] setup( name='domapping', version=version, description=__doc__, long_description=readme + '\n\n' + history, keywords='jsonschema, elasticsearch', license='GPLv2', author='CERN', author_email='info@inveniosoftware.org', url='https://github.com/inveniosoftware/domapping', packages=packages, zip_safe=False, include_package_data=True, platforms='any', entry_points={ 'console_scripts': [ 'domapping = domapping.cli:cli', ], }, extras_require=extras_require, install_requires=install_requires, setup_requires=setup_requires, tests_require=tests_require, classifiers=[ 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Development Status :: 3 - Alpha', ], cmdclass={'test': PyTest}, )
gpl-2.0
seewindcn/tortoisehg
src/tortoisehg/hgqt/cmdcore.py
1
30355
# cmdcore.py - run Mercurial commands in a separate thread or process # # Copyright 2010 Yuki KODAMA <endflow.net@gmail.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2, incorporated herein by reference. import os, signal, struct, sys, time from PyQt4.QtCore import QBuffer, QIODevice, QObject, QProcess, QTimer from PyQt4.QtCore import pyqtSignal, pyqtSlot from tortoisehg.util import hglib, paths, pipeui from tortoisehg.util.i18n import _ class ProgressMessage(tuple): __slots__ = () def __new__(cls, topic, pos, item='', unit='', total=None): return tuple.__new__(cls, (topic, pos, item, unit, total)) @property def topic(self): return self[0] # unicode @property def pos(self): return self[1] # int or None @property def item(self): return self[2] # unicode @property def unit(self): return self[3] # unicode @property def total(self): return self[4] # int or None def __repr__(self): names = ('topic', 'pos', 'item', 'unit', 'total') fields = ('%s=%r' % (n, v) for n, v in zip(names, self)) return '%s(%s)' % (self.__class__.__name__, ', '.join(fields)) class UiHandler(object): """Interface to handle user interaction of Mercurial commands""" NoInput = 0 TextInput = 1 PasswordInput = 2 ChoiceInput = 3 def __init__(self): self._dataout = None def setPrompt(self, text, mode, default=None): pass def getLineInput(self): # '' to use default; None to abort return '' def setDataOutputDevice(self, device): # QIODevice to write data output; None to disable capturing self._dataout = device def writeOutput(self, data, label): if not self._dataout or label.startswith('ui.') or ' ui.' in label: return -1 return self._dataout.write(data) def _createDefaultUiHandler(uiparent): if uiparent is None: return UiHandler() # this makes layering violation but is handy to create GUI handler by # default. nobody would want to write # uihandler = cmdui.InteractiveUiHandler(self) # cmdagent.runCommand(..., uihandler) # in place of # cmdagent.runCommand(..., self) from tortoisehg.hgqt import cmdui return cmdui.InteractiveUiHandler(uiparent) class _ProtocolError(Exception): """Error while processing server message; must be caught by CmdWorker""" class CmdWorker(QObject): """Back-end service to run Mercurial commands""" # If worker has permanent service, serviceState() should be overridden # to represent the availability of the service. NoService denotes that # it can run command or quit immediately. NoService = 0 Starting = 1 Ready = 2 Stopping = 3 Restarting = 4 NotRunning = 5 serviceStateChanged = pyqtSignal(int) commandFinished = pyqtSignal(int) outputReceived = pyqtSignal(str, str) progressReceived = pyqtSignal(ProgressMessage) def serviceState(self): return CmdWorker.NoService def startService(self): # NotRunning->Starting; Stopping->Restarting->Starting; *->* pass def stopService(self): # {Starting,Ready,Restarting}->Stopping; *->* pass def startCommand(self, cmdline, uihandler): raise NotImplementedError def abortCommand(self): raise NotImplementedError def isCommandRunning(self): raise NotImplementedError _localprocexts = [ 'tortoisehg.util.hgcommands', 'tortoisehg.util.partialcommit', 'tortoisehg.util.pipeui', ] _localserverexts = [ 'tortoisehg.util.hgdispatch', ] if os.name == 'nt': # to translate WM_CLOSE posted by QProcess.terminate() _localprocexts.append('tortoisehg.util.win32ill') def _interruptproc(proc): proc.terminate() else: def _interruptproc(proc): os.kill(int(proc.pid()), signal.SIGINT) def _fixprocenv(proc): env = os.environ.copy() # disable flags and extensions that might break our output parsing # (e.g. "defaults" arguments, "PAGER" of "email --test") env['HGPLAINEXCEPT'] = 'alias,i18n,revsetalias' if not getattr(sys, 'frozen', False): # make sure hg process can look up our modules env['PYTHONPATH'] = (paths.get_prog_root() + os.pathsep + env.get('PYTHONPATH', '')) # not using setProcessEnvironment() for compatibility with PyQt 4.6 proc.setEnvironment([hglib.tounicode('%s=%s' % p) for p in env.iteritems()]) def _proccmdline(ui, exts): configs = [(section, name, value) for section, name, value in ui.walkconfig() if ui.configsource(section, name) == '--config'] configs.extend(('extensions', e, '') for e in exts) cmdline = list(paths.get_hg_command()) for section, name, value in configs: cmdline.extend(('--config', '%s.%s=%s' % (section, name, value))) return map(hglib.tounicode, cmdline) class CmdProc(CmdWorker): 'Run mercurial command in separate process' def __init__(self, ui, parent=None, cwd=None): super(CmdProc, self).__init__(parent) self._ui = ui self._uihandler = None self._proc = proc = QProcess(self) _fixprocenv(proc) if cwd: proc.setWorkingDirectory(cwd) proc.finished.connect(self._finish) proc.readyReadStandardOutput.connect(self._stdout) proc.readyReadStandardError.connect(self._stderr) proc.error.connect(self._handleerror) def startCommand(self, cmdline, uihandler): self._uihandler = uihandler fullcmdline = _proccmdline(self._ui, _localprocexts) fullcmdline.extend(cmdline) self._proc.start(fullcmdline[0], fullcmdline[1:], QIODevice.ReadOnly) def abortCommand(self): if not self.isCommandRunning(): return _interruptproc(self._proc) def isCommandRunning(self): return self._proc.state() != QProcess.NotRunning @pyqtSlot(int) def _finish(self, ret): self._uihandler = None self.commandFinished.emit(ret) @pyqtSlot(QProcess.ProcessError) def _handleerror(self, error): if error == QProcess.FailedToStart: self.outputReceived.emit(_('failed to start command\n'), 'ui.error') self._finish(-1) elif error != QProcess.Crashed: self.outputReceived.emit(_('error while running command\n'), 'ui.error') @pyqtSlot() def _stdout(self): data = self._proc.readAllStandardOutput().data() self._processRead(data, '') @pyqtSlot() def _stderr(self): data = self._proc.readAllStandardError().data() self._processRead(data, 'ui.error') def _processRead(self, fulldata, defaultlabel): for data in pipeui.splitmsgs(fulldata): msg, label = pipeui.unpackmsg(data) if (not defaultlabel # only stdout and self._uihandler.writeOutput(msg, label) >= 0): continue msg = hglib.tounicode(msg) label = hglib.tounicode(label) if 'ui.progress' in label.split(): progress = ProgressMessage(*pipeui.unpackprogress(msg)) self.progressReceived.emit(progress) else: self.outputReceived.emit(msg, label or defaultlabel) class CmdServer(CmdWorker): """Run Mercurial commands in command server process""" def __init__(self, ui, parent=None, cwd=None): super(CmdServer, self).__init__(parent) self._ui = ui self._uihandler = UiHandler() self._readchtable = self._idlechtable self._readq = [] # (ch, data or datasize), ... # deadline for arrival of hello message and immature data sec = ui.configint('tortoisehg', 'cmdserver.readtimeout', 5) self._readtimer = QTimer(self, interval=sec * 1000, singleShot=True) self._readtimer.timeout.connect(self._onReadTimeout) self._proc = self._createProc(cwd) self._servicestate = CmdWorker.NotRunning def _createProc(self, cwd): proc = QProcess(self) _fixprocenv(proc) if cwd: proc.setWorkingDirectory(cwd) proc.error.connect(self._onServiceError) proc.finished.connect(self._onServiceFinished) proc.setReadChannel(QProcess.StandardOutput) proc.readyRead.connect(self._onReadyRead) proc.readyReadStandardError.connect(self._onReadyReadError) return proc def serviceState(self): return self._servicestate def _changeServiceState(self, newstate): if self._servicestate == newstate: return self._servicestate = newstate self.serviceStateChanged.emit(newstate) def startService(self): if self._servicestate == CmdWorker.NotRunning: self._startService() elif self._servicestate == CmdWorker.Stopping: self._changeServiceState(CmdWorker.Restarting) def _startService(self): if self._proc.bytesToWrite() > 0: # QTBUG-44517: recreate QProcess to discard remainder of last # request; otherwise it would be written to new process oldproc = self._proc self._proc = self._createProc(oldproc.workingDirectory()) oldproc.setParent(None) cmdline = _proccmdline(self._ui, _localprocexts + _localserverexts) cmdline.extend(['serve', '--cmdserver', 'pipe', '--config', 'ui.interactive=True']) self._readchtable = self._hellochtable self._readtimer.start() self._changeServiceState(CmdWorker.Starting) self._proc.start(cmdline[0], cmdline[1:]) def stopService(self): if self._servicestate in (CmdWorker.Starting, CmdWorker.Ready): self._stopService() elif self._servicestate == CmdWorker.Restarting: self._changeServiceState(CmdWorker.Stopping) def _stopService(self): self._changeServiceState(CmdWorker.Stopping) _interruptproc(self._proc) # make sure "serve" loop ends by EOF (necessary on Windows) self._proc.closeWriteChannel() def _emitError(self, msg): self.outputReceived.emit('cmdserver: %s\n' % msg, 'ui.error') @pyqtSlot(QProcess.ProcessError) def _onServiceError(self, error): self._emitError(self._proc.errorString()) if error == QProcess.FailedToStart: self._onServiceFinished() @pyqtSlot() def _onServiceFinished(self): self._uihandler = UiHandler() self._readchtable = self._idlechtable del self._readq[:] self._readtimer.stop() if self._servicestate == CmdWorker.Restarting: self._startService() return if self._servicestate != CmdWorker.Stopping: self._emitError(_('process exited unexpectedly with code %d') % self._proc.exitCode()) self._changeServiceState(CmdWorker.NotRunning) def isCommandRunning(self): return self._readchtable is self._runcommandchtable def startCommand(self, cmdline, uihandler): assert self._servicestate == CmdWorker.Ready assert not self.isCommandRunning() try: data = hglib.fromunicode('\0'.join(cmdline)) except UnicodeEncodeError, inst: self._emitError(_('failed to encode command: %s') % inst) self._finishCommand(-1) return self._uihandler = uihandler self._readchtable = self._runcommandchtable self._proc.write('runcommand\n') self._writeBlock(data) def abortCommand(self): if not self.isCommandRunning(): return _interruptproc(self._proc) def _finishCommand(self, ret): self._uihandler = UiHandler() self._readchtable = self._idlechtable self.commandFinished.emit(ret) def _writeBlock(self, data): self._proc.write(struct.pack('>I', len(data))) self._proc.write(data) @pyqtSlot() def _onReadyRead(self): proc = self._proc headersize = 5 try: while True: header = str(proc.peek(headersize)) if not header: self._readtimer.stop() break if len(header) < headersize: self._readtimer.start() break ch, datasize = struct.unpack('>cI', header) if ch in 'IL': # input channel has no data proc.read(headersize) self._readq.append((ch, datasize)) continue if proc.bytesAvailable() < headersize + datasize: self._readtimer.start() break proc.read(headersize) data = str(proc.read(datasize)) self._readq.append((ch, data)) # don't do much things in readyRead slot for simplicity QTimer.singleShot(0, self._dispatchRead) except Exception: self.stopService() raise @pyqtSlot() def _onReadTimeout(self): startbytes = str(self._proc.peek(20)) if startbytes: # data corruption because bad extension might write to stdout? self._emitError(_('timed out while reading: %r...') % startbytes) else: self._emitError(_('timed out waiting for message')) self.stopService() @pyqtSlot() def _dispatchRead(self): try: while self._readq: ch, dataorsize = self._readq.pop(0) try: chfunc = self._readchtable[ch] except KeyError: if not ch.isupper(): continue raise _ProtocolError(_('unexpected response on required ' 'channel %r') % ch) chfunc(self, ch, dataorsize) except _ProtocolError, inst: self._emitError(inst.args[0]) self.stopService() except Exception: self.stopService() raise @pyqtSlot() def _onReadyReadError(self): fulldata = str(self._proc.readAllStandardError()) for data in pipeui.splitmsgs(fulldata): msg, label = pipeui.unpackmsg(data) msg = hglib.tounicode(msg) label = hglib.tounicode(label) self.outputReceived.emit(msg, label or 'ui.error') def _processHello(self, _ch, data): try: fields = dict(l.split(':', 1) for l in data.splitlines()) capabilities = fields['capabilities'].split() except (KeyError, ValueError): raise _ProtocolError(_('invalid "hello" message: %r') % data) if 'runcommand' not in capabilities: raise _ProtocolError(_('no "runcommand" capability')) self._readchtable = self._idlechtable self._changeServiceState(CmdWorker.Ready) def _processOutput(self, ch, data): msg, label = pipeui.unpackmsg(data) if ch == 'o' and self._uihandler.writeOutput(msg, label) >= 0: return msg = hglib.tounicode(msg) label = hglib.tounicode(label) labelset = label.split() if 'ui.progress' in labelset: progress = ProgressMessage(*pipeui.unpackprogress(msg)) self.progressReceived.emit(progress) elif 'ui.prompt' in labelset: if 'ui.getpass' in labelset: mode = UiHandler.PasswordInput elif 'ui.promptchoice' in labelset: mode = UiHandler.ChoiceInput else: mode = UiHandler.TextInput prompt, default = pipeui.unpackprompt(msg) self._uihandler.setPrompt(prompt, mode, default) else: self.outputReceived.emit(msg, label) def _processCommandResult(self, _ch, data): try: ret, = struct.unpack('>i', data) except struct.error: raise _ProtocolError(_('corrupted command result: %r') % data) self._finishCommand(ret) def _processLineRequest(self, _ch, size): text = self._uihandler.getLineInput() if text is None: self._writeBlock('') return try: data = hglib.fromunicode(text) + '\n' except UnicodeEncodeError, inst: self._emitError(_('failed to encode input: %s') % inst) self.abortCommand() return for start in xrange(0, len(data), size): self._writeBlock(data[start:start + size]) _idlechtable = { 'o': _processOutput, 'e': _processOutput, } _hellochtable = { 'o': _processHello, 'e': _processOutput, } _runcommandchtable = { 'o': _processOutput, 'e': _processOutput, 'r': _processCommandResult, # implement 'I' (data input) channel if necessary 'L': _processLineRequest, } _workertypes = { 'proc': CmdProc, 'server': CmdServer, } class CmdSession(QObject): """Run Mercurial commands in a background thread or process""" commandFinished = pyqtSignal(int) # in order to receive only notification messages of session state, use # "controlMessage"; otherwise use "outputReceived" controlMessage = pyqtSignal(str) outputReceived = pyqtSignal(str, str) progressReceived = pyqtSignal(ProgressMessage) readyRead = pyqtSignal() def __init__(self, cmdlines, uihandler, parent=None): super(CmdSession, self).__init__(parent) self._uihandler = uihandler self._worker = None self._queue = list(cmdlines) self._qnextp = 0 self._abortbyuser = False self._erroroutputs = [] self._warningoutputs = [] self._dataoutrbuf = QBuffer(self) self._exitcode = 0 if not cmdlines: # assumes null session is failure for convenience self._exitcode = -1 def run(self, worker): '''Execute Mercurial command''' if self._worker or self._qnextp >= len(self._queue): return self._connectWorker(worker) if worker.serviceState() in (CmdWorker.NoService, CmdWorker.Ready): self._runNext() def abort(self): '''Cancel running Mercurial command''' if self.isRunning(): self._worker.abortCommand() self._qnextp = len(self._queue) self._abortbyuser = True elif not self.isFinished(): self._abortbyuser = True # -1 instead of 255 for compatibility with CmdThread self._finish(-1) def isAborted(self): """True if commands have finished by user abort""" return self.isFinished() and self._abortbyuser def isFinished(self): """True if all pending commands have finished or been aborted""" return self._qnextp >= len(self._queue) and not self.isRunning() def isRunning(self): """True if a command is running; False if finished or not started yet""" return bool(self._worker) and self._qnextp > 0 def errorString(self): """Error message received in the last command""" if self._abortbyuser: return _('Terminated by user') else: return ''.join(self._erroroutputs).rstrip() def warningString(self): """Warning message received in the last command""" return ''.join(self._warningoutputs).rstrip() def exitCode(self): """Integer return code of the last command""" return self._exitcode def setCaptureOutput(self, enabled): """If enabled, data outputs (without "ui.*" label) are queued and outputReceived signal is not emitted in that case. This is useful for receiving data to be parsed or copied to the clipboard. """ # pseudo FIFO between client "rbuf" and worker "wbuf"; not efficient # for large data since all outputs will be stored in memory if enabled: self._dataoutrbuf.open(QIODevice.ReadOnly | QIODevice.Truncate) dataoutwbuf = QBuffer(self._dataoutrbuf.buffer()) dataoutwbuf.bytesWritten.connect(self.readyRead) dataoutwbuf.open(QIODevice.WriteOnly) else: self._dataoutrbuf.close() dataoutwbuf = None self.setOutputDevice(dataoutwbuf) def setOutputDevice(self, device): """If set, data outputs will be sent to the specified device""" if self.isRunning(): raise RuntimeError('command already running') self._uihandler.setDataOutputDevice(device) def read(self, maxlen): """Read output if capturing enabled; ui messages are not included""" return self._dataoutrbuf.read(maxlen) def readAll(self): return self._dataoutrbuf.readAll() def readLine(self, maxlen=0): return self._dataoutrbuf.readLine(maxlen) def canReadLine(self): return self._dataoutrbuf.canReadLine() def peek(self, maxlen): return self._dataoutrbuf.peek(maxlen) def _connectWorker(self, worker): self._worker = worker worker.serviceStateChanged.connect(self._onWorkerStateChanged) worker.commandFinished.connect(self._onCommandFinished) worker.outputReceived.connect(self.outputReceived) worker.outputReceived.connect(self._captureOutput) worker.progressReceived.connect(self.progressReceived) def _disconnectWorker(self): worker = self._worker if not worker: return worker.serviceStateChanged.disconnect(self._onWorkerStateChanged) worker.commandFinished.disconnect(self._onCommandFinished) worker.outputReceived.disconnect(self.outputReceived) worker.outputReceived.disconnect(self._captureOutput) worker.progressReceived.disconnect(self.progressReceived) self._worker = None def _emitControlMessage(self, msg): self.controlMessage.emit(msg) self.outputReceived.emit(msg + '\n', 'control') def _runNext(self): cmdline = self._queue[self._qnextp] self._qnextp += 1 self._emitControlMessage('% hg ' + hglib.prettifycmdline(cmdline)) self._worker.startCommand(cmdline, self._uihandler) def _finish(self, ret): self._qnextp = len(self._queue) self._disconnectWorker() self._exitcode = ret self.commandFinished.emit(ret) @pyqtSlot(int) def _onWorkerStateChanged(self, state): if state == CmdWorker.Ready: assert self._qnextp == 0 self._runNext() elif state == CmdWorker.NotRunning: # unexpected end of command execution self._finish(-1) @pyqtSlot(int) def _onCommandFinished(self, ret): if ret == -1: if self._abortbyuser: msg = _('[command terminated by user %s]') else: msg = _('[command interrupted %s]') elif ret: msg = _('[command returned code %d %%s]') % ret else: msg = _('[command completed successfully %s]') self._emitControlMessage(msg % time.asctime()) if ret != 0 or self._qnextp >= len(self._queue): self._finish(ret) else: self._runNext() @pyqtSlot(str, str) def _captureOutput(self, msg, label): if not label: return # fast path labelset = unicode(label).split() # typically ui.error is sent only once at end if 'ui.error' in labelset: self._erroroutputs.append(unicode(msg)) elif 'ui.warning' in labelset: self._warningoutputs.append(unicode(msg)) def nullCmdSession(): """Finished CmdSession object which can be used as the initial value exitCode() is -1 so that the command dialog can finish with error status if nothing executed. >>> sess = nullCmdSession() >>> sess.isFinished(), sess.isRunning(), sess.isAborted(), sess.exitCode() (True, False, False, -1) >>> sess.abort() # should not change flags >>> sess.isFinished(), sess.isRunning(), sess.isAborted(), sess.exitCode() (True, False, False, -1) Null session can be set up just like one made by runCommand(). It can be used as an object representing failure or canceled operation. >>> sess.setOutputDevice(QBuffer()) """ return CmdSession([], UiHandler()) class CmdAgent(QObject): """Manage requests of Mercurial commands""" serviceStopped = pyqtSignal() busyChanged = pyqtSignal(bool) # Signal forwarding: # worker ---- agent commandFinished: session (= last one of worker) # \ / outputReceived: worker + session # session progressReceived: worker (= session) # # Inactive session is not started by the agent, so agent.commandFinished # won't be emitted when waiting session is aborted. commandFinished = pyqtSignal(CmdSession) outputReceived = pyqtSignal(str, str) progressReceived = pyqtSignal(ProgressMessage) # isBusy() is False when the last commandFinished is emitted, but you # shouldn't rely on the emission order of busyChanged and commandFinished. def __init__(self, ui, parent=None, cwd=None, worker=None): super(CmdAgent, self).__init__(parent) self._ui = ui self._worker = self._createWorker(cwd, worker or 'server') self._sessqueue = [] # [active, waiting...] self._runlater = QTimer(self, interval=0, singleShot=True) self._runlater.timeout.connect(self._runNextSession) def isServiceRunning(self): stoppedstates = (CmdWorker.NoService, CmdWorker.NotRunning) return self._worker.serviceState() not in stoppedstates def stopService(self): """Shut down back-end services so that this can be deleted safely or reconfigured; serviceStopped will be emitted asynchronously""" self._worker.stopService() @pyqtSlot() def _tryEmitServiceStopped(self): if not self.isServiceRunning(): self.serviceStopped.emit() def isBusy(self): return bool(self._sessqueue) def _enqueueSession(self, sess): self._sessqueue.append(sess) if len(self._sessqueue) == 1: self.busyChanged.emit(self.isBusy()) # make sure no command signals emitted in the current context self._runlater.start() def _dequeueSession(self): del self._sessqueue[0] if self._sessqueue: # make sure client can receive commandFinished before next session self._runlater.start() else: self._runlater.stop() self.busyChanged.emit(self.isBusy()) def _cleanupWaitingSession(self): for i in reversed(xrange(1, len(self._sessqueue))): sess = self._sessqueue[i] if sess.isFinished(): del self._sessqueue[i] sess.setParent(None) def runCommand(self, cmdline, uihandler=None): """Executes a single Mercurial command asynchronously and returns new CmdSession object""" return self.runCommandSequence([cmdline], uihandler) def runCommandSequence(self, cmdlines, uihandler=None): """Executes a series of Mercurial commands asynchronously and returns new CmdSession object which will provide notification signals. The optional uihandler is the call-back of user-interaction requests. If uihandler does not implement UiHandler interface, it will be used as the parent widget of the default InteractiveUiHandler. If uihandler is None, no interactive prompt will be displayed. If the specified uihandler is a UiHandler object, it should be created per request in order to avoid sharing the same uihandler across several CmdSession objects. CmdSession object will be disowned on command finished. The specified uihandler is unrelated to the lifetime of CmdSession object. If one of the preceding command exits with non-zero status, the following commands won't be executed. """ if not isinstance(uihandler, UiHandler): uihandler = _createDefaultUiHandler(uihandler) sess = CmdSession(cmdlines, uihandler, self) sess.commandFinished.connect(self._onCommandFinished) sess.controlMessage.connect(self._forwardControlMessage) self._enqueueSession(sess) return sess def abortCommands(self): """Abort running and queued commands; all command sessions will emit commandFinished""" for sess in self._sessqueue[:]: sess.abort() def _createWorker(self, cwd, name): self._ui.debug("creating cmdworker '%s'\n" % name) worker = _workertypes[name](self._ui, self, cwd) worker.serviceStateChanged.connect(self._tryEmitServiceStopped) worker.outputReceived.connect(self.outputReceived) worker.progressReceived.connect(self.progressReceived) return worker @pyqtSlot() def _runNextSession(self): sess = self._sessqueue[0] worker = self._worker assert not worker.isCommandRunning() sess.run(worker) # start after connected to sess so that it can receive immediate error worker.startService() @pyqtSlot() def _onCommandFinished(self): sess = self._sessqueue[0] if not sess.isFinished(): # waiting session is aborted, just delete it self._cleanupWaitingSession() return self._dequeueSession() self.commandFinished.emit(sess) sess.setParent(None) @pyqtSlot(str) def _forwardControlMessage(self, msg): self.outputReceived.emit(msg + '\n', 'control')
gpl-2.0
hastalafiesta/HKernel
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
nhuntwalker/astroML
examples/datasets/plot_sdss_galaxy_colors.py
3
1300
""" SDSS Galaxy Colors ------------------ The function :func:`fetch_sdss_galaxy_colors` used below actually queries the SDSS CASjobs server for the colors of the 50,000 galaxies. Below we extract the :math:`u - g` and :math:`g - r` colors for 5000 stars, and scatter-plot the results """ # Author: Jake VanderPlas <vanderplas@astro.washington.edu> # License: BSD # The figure is an example from astroML: see http://astroML.github.com import numpy as np from matplotlib import pyplot as plt from sklearn.neighbors import KNeighborsRegressor from astroML.datasets import fetch_sdss_galaxy_colors #------------------------------------------------------------ # Download data data = fetch_sdss_galaxy_colors() data = data[::10] # truncate for plotting # Extract colors and spectral class ug = data['u'] - data['g'] gr = data['g'] - data['r'] spec_class = data['specClass'] stars = (spec_class == 2) qsos = (spec_class == 3) #------------------------------------------------------------ # Prepare plot fig = plt.figure() ax = fig.add_subplot(111) ax.set_xlim(-0.5, 2.5) ax.set_ylim(-0.5, 1.5) ax.plot(ug[stars], gr[stars], '.', ms=4, c='b', label='stars') ax.plot(ug[qsos], gr[qsos], '.', ms=4, c='r', label='qsos') ax.legend(loc=2) ax.set_xlabel('$u-g$') ax.set_ylabel('$g-r$') plt.show()
bsd-2-clause
xiaom/zenodo
zenodo/modules/deposit/views.py
1
13106
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016, 2017 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Redirects for legacy URLs.""" from __future__ import absolute_import, print_function from datetime import datetime from functools import wraps from elasticsearch.exceptions import NotFoundError from flask import Blueprint, abort, current_app, flash, redirect, \ render_template, request, url_for, jsonify from flask_babelex import gettext as _ from flask_security import current_user, login_required from invenio_accounts.models import User from invenio_communities.models import Community from invenio_db import db from invenio_indexer.api import RecordIndexer from invenio_pidrelations.contrib.versioning import PIDVersioning from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError from invenio_pidstore.models import PersistentIdentifier from invenio_pidstore.resolver import Resolver from invenio_records_files.models import RecordsBuckets from invenio_records_ui.signals import record_viewed from zenodo.modules.deposit.utils import delete_record from zenodo.modules.records.permissions import record_permission_factory from .api import ZenodoDeposit from .fetchers import zenodo_deposit_fetcher from .forms import RecordDeleteForm from .tasks import datacite_inactivate, datacite_register from .extractor import zenodo_metadata_extractor blueprint = Blueprint( 'zenodo_deposit', __name__, url_prefix='', template_folder='templates', static_folder='static', ) @blueprint.errorhandler(PIDDeletedError) def tombstone_errorhandler(error): """Render tombstone page.""" return render_template( current_app.config['RECORDS_UI_TOMBSTONE_TEMPLATE'], pid=error.pid, record=error.record or {}, ), 410 def pass_record(action, deposit_cls=ZenodoDeposit): """Pass record and deposit record to function.""" def decorator(f): @wraps(f) def inner(pid_value): # Resolve pid_value to record pid and record pid, record = pid_value.data # Check permissions. permission = record_permission_factory( record=record, action=action) if not permission.can(): abort(403) # Fetch deposit id from record and resolve deposit record and pid. depid = zenodo_deposit_fetcher(None, record) if not depid: abort(404) depid, deposit = Resolver( pid_type=depid.pid_type, object_type='rec', getter=deposit_cls.get_record, ).resolve(depid.pid_value) return f(pid=pid, record=record, depid=depid, deposit=deposit) return inner return decorator @login_required def legacy_index(): """Legacy deposit.""" c_id = request.args.get('c', type=str) if c_id: return redirect(url_for('invenio_deposit_ui.new', c=c_id)) return render_template(current_app.config['DEPOSIT_UI_INDEX_TEMPLATE']) @login_required def new(): """Create a new deposit.""" c = Community.get(request.args.get('c', type=str)) return render_template(current_app.config['DEPOSIT_UI_NEW_TEMPLATE'], record={'_deposit': {'id': None}}, community=c) @blueprint.route( '/record/<pid(recid,record_class=' '"zenodo.modules.records.api:ZenodoRecord"):pid_value>', methods=['POST'] ) @login_required @pass_record('update') def edit(pid=None, record=None, depid=None, deposit=None): """Edit a record.""" # If the record doesn't have a DOI, its deposit shouldn't be editable. if 'doi' not in record: abort(404) return redirect(url_for( 'invenio_deposit_ui.{0}'.format(depid.pid_type), pid_value=depid.pid_value )) @blueprint.route( '/record/<pid(recid,record_class=' '"zenodo.modules.records.api:ZenodoRecord"):pid_value>' '/newversion', methods=['POST'] ) @login_required @pass_record('newversion') def newversion(pid=None, record=None, depid=None, deposit=None): """Create a new version of a record.""" # If the record doesn't have a DOI, its deposit shouldn't be editable. if 'doi' not in record: abort(404) # FIXME: Maybe this has to go inside the API (`ZenodoDeposit.newversion`) # If this is not the latest version, get the latest and extend it latest_pid = PIDVersioning(child=pid).last_child if pid != latest_pid: # We still want to do a POST, so we specify a 307 reidrect code return redirect(url_for('zenodo_deposit.newversion', pid_value=latest_pid.pid_value), code=307) deposit.newversion() db.session.commit() new_version_deposit = PIDVersioning(child=pid).draft_child_deposit return redirect(url_for( 'invenio_deposit_ui.{0}'.format(new_version_deposit.pid_type), pid_value=new_version_deposit.pid_value )) @blueprint.route( '/record/<pid(recid,record_class=' '"zenodo.modules.records.api:ZenodoRecord"):pid_value>' '/registerconceptdoi', methods=['POST'] ) @login_required @pass_record('registerconceptdoi') def registerconceptdoi(pid=None, record=None, depid=None, deposit=None): """Register the Concept DOI for the record.""" # If the record doesn't have a DOI, its deposit shouldn't be editable. if 'conceptdoi' in record: abort(404) # TODO: Abort with better code if record is versioned deposit.registerconceptdoi() db.session.commit() return redirect(url_for('invenio_records_ui.recid', pid_value=pid.pid_value)) @blueprint.route( '/record' '/<pid(recid,record_class=' '"zenodo.modules.records.api:ZenodoRecord"):pid_value>' '/admin/delete', methods=['GET', 'POST'] ) @login_required @pass_record('delete') def delete(pid=None, record=None, depid=None, deposit=None): """Delete a record.""" # View disabled until properly implemented and tested. try: doi = PersistentIdentifier.get('doi', record['doi']) except PIDDoesNotExistError: doi = None owners = User.query.filter(User.id.in_(record.get('owners', []))).all() pids = [pid, depid, doi] if 'conceptdoi' in record: conceptdoi = PersistentIdentifier.get('doi', record['conceptdoi']) pids.append(conceptdoi) else: conceptdoi = None if 'conceptrecid' in record: conceptrecid = PersistentIdentifier.get('recid', record['conceptrecid']) pids.append(conceptrecid) else: conceptrecid = None form = RecordDeleteForm() form.standard_reason.choices = current_app.config['ZENODO_REMOVAL_REASONS'] if form.validate_on_submit(): reason = form.reason.data or dict( current_app.config['ZENODO_REMOVAL_REASONS'] )[form.standard_reason.data] delete_record(record.id, reason, int(current_user.get_id())) flash( _('Record %(recid)s and associated objects successfully deleted.', recid=pid.pid_value), category='success' ) return redirect(url_for('zenodo_frontpage.index')) return render_template( 'zenodo_deposit/delete.html', form=form, owners=owners, pid=pid, pids=pids, record=record, deposit=deposit, ) @blueprint.app_context_processor def current_datetime(): """Template contex processor which adds current datetime to the context.""" now = datetime.utcnow() return { 'current_datetime': now, 'current_date': now.date(), 'current_time': now.time(), } @blueprint.route( '/record/extractmetadata/<version_id>', methods=['POST'] ) def extractmetadata(version_id=None): """Extract metadata for a given file TODO: move to an independent module `invenio-extractmetadata-rest` module. """ return jsonify(zenodo_metadata_extractor(version_id)) @blueprint.app_template_filter('tolinksjs') def to_links_js(pid, deposit=None): """Get API links.""" if not isinstance(deposit, ZenodoDeposit): return [] self_url = current_app.config['DEPOSIT_RECORDS_API'].format( pid_value=pid.pid_value) links = { 'self': self_url, 'html': url_for( 'invenio_deposit_ui.{}'.format(pid.pid_type), pid_value=pid.pid_value), 'bucket': current_app.config['DEPOSIT_FILES_API'] + '/{0}'.format( str(deposit.files.bucket.id)), 'discard': self_url + '/actions/discard', 'edit': self_url + '/actions/edit', 'publish': self_url + '/actions/publish', 'newversion': self_url + '/actions/newversion', 'registerconceptdoi': self_url + '/actions/registerconceptdoi', 'files': self_url + '/files', } # Add versioning links conceptrecid = deposit.get('conceptrecid') if conceptrecid: conceptrecid = PersistentIdentifier.get('recid', conceptrecid) pv = PIDVersioning(parent=conceptrecid) latest_record = pv.last_child if latest_record: links['latest'] = current_app.config['RECORDS_API'].format( pid_value=latest_record.pid_value) links['latest_html'] = url_for( 'invenio_records_ui.recid', pid_value=latest_record.pid_value) draft_child_depid = pv.draft_child_deposit if draft_child_depid: links['latest_draft'] = ( current_app.config['DEPOSIT_RECORDS_API'] .format(pid_value=draft_child_depid.pid_value)) links['latest_draft_html'] = url_for( 'invenio_deposit_ui.{}'.format(draft_child_depid.pid_type), pid_value=draft_child_depid.pid_value) return links @blueprint.app_template_filter('tofilesjs') def to_files_js(deposit): """List files in a deposit.""" if not isinstance(deposit, ZenodoDeposit): return [] res = [] for f in deposit.files: res.append({ 'key': f.key, 'version_id': f.version_id, 'checksum': f.file.checksum, 'size': f.file.size, 'completed': True, 'progress': 100, 'links': { 'self': ( current_app.config['DEPOSIT_FILES_API'] + u'/{bucket}/{key}?versionId={version_id}'.format( bucket=f.bucket_id, key=f.key, version_id=f.version_id, )) } }) for f in deposit.multipart_files.all(): res.append({ 'key': f.key, 'size': f.size, 'multipart': True, 'completed': f.completed, 'processing': True, 'progress': 100, 'links': { 'self': ( current_app.config['DEPOSIT_FILES_API'] + u'/{bucket}/{key}?uploadId={upload_id}'.format( bucket=f.bucket_id, key=f.key, upload_id=f.upload_id, )), } }) return res def default_view_method(pid, record, template=None): """Default view method for updating record. Sends ``record_viewed`` signal and renders template. :param pid: PID object. :param record: Record object. :param template: Template to render. """ # Fetch deposit id from record and resolve deposit record and pid. depid = zenodo_deposit_fetcher(None, record) if not depid: abort(404) depid, deposit = Resolver( pid_type=depid.pid_type, object_type='rec', getter=ZenodoDeposit.get_record, ).resolve(depid.pid_value) # Put deposit in edit mode if not already. if deposit['_deposit']['status'] != 'draft': deposit = deposit.edit() db.session.commit() record_viewed.send( current_app._get_current_object(), pid=pid, record=record, ) return render_template( template, pid=pid, record=record, )
gpl-2.0
TalShafir/ansible
lib/ansible/modules/storage/netapp/na_ontap_cluster_ha.py
9
4439
#!/usr/bin/python # (c) 2018, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = ''' author: NetApp Ansible Team (ng-ansibleteam@netapp.com) description: - "Enable or disable HA on a cluster" extends_documentation_fragment: - netapp.na_ontap module: na_ontap_cluster_ha options: state: choices: ['present', 'absent'] description: - "Whether HA on cluster should be enabled or disabled." default: present short_description: NetApp ONTAP Manage HA status for cluster version_added: "2.6" ''' EXAMPLES = """ - name: "Enable HA status for cluster" na_ontap_cluster_ha: state: present hostname: "{{ netapp_hostname }}" username: "{{ netapp_username }}" password: "{{ netapp_password }}" """ RETURN = """ """ import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils from ansible.module_utils.netapp_module import NetAppModule HAS_NETAPP_LIB = netapp_utils.has_netapp_lib() class NetAppOntapClusterHA(object): """ object initialize and class methods """ def __init__(self): self.argument_spec = netapp_utils.na_ontap_host_argument_spec() self.argument_spec.update(dict( state=dict(required=False, choices=['present', 'absent'], default='present'), )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) self.na_helper = NetAppModule() self.parameters = self.na_helper.set_parameters(self.module.params) if HAS_NETAPP_LIB is False: self.module.fail_json(msg="the python NetApp-Lib module is required") else: self.server = netapp_utils.setup_na_ontap_zapi(module=self.module) def modify_cluster_ha(self, configure): """ Enable or disable HA on cluster :return: None """ cluster_ha_modify = netapp_utils.zapi.NaElement.create_node_with_children( 'cluster-ha-modify', **{'ha-configured': configure}) try: self.server.invoke_successfully(cluster_ha_modify, enable_tunneling=True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error modifying cluster HA to %s: %s' % (configure, to_native(error)), exception=traceback.format_exc()) def get_cluster_ha_enabled(self): """ Get current cluster HA details :return: dict if enabled, None if disabled """ cluster_ha_get = netapp_utils.zapi.NaElement('cluster-ha-get') try: result = self.server.invoke_successfully(cluster_ha_get, enable_tunneling=True) except netapp_utils.zapi.NaApiError as error: self.module.fail_json(msg='Error fetching cluster HA details', exception=traceback.format_exc()) cluster_ha_info = result.get_child_by_name('attributes').get_child_by_name('cluster-ha-info') if cluster_ha_info.get_child_content('ha-configured') == 'true': return {'ha-configured': True} return None def apply(self): """ Apply action to cluster HA """ results = netapp_utils.get_cserver(self.server) cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results) netapp_utils.ems_log_event("na_ontap_cluster_ha", cserver) current = self.get_cluster_ha_enabled() cd_action = self.na_helper.get_cd_action(current, self.parameters) if cd_action == 'create': self.modify_cluster_ha("true") elif cd_action == 'delete': self.modify_cluster_ha("false") self.module.exit_json(changed=self.na_helper.changed) def main(): """ Create object and call apply """ ha_obj = NetAppOntapClusterHA() ha_obj.apply() if __name__ == '__main__': main()
gpl-3.0
sometallgit/AutoUploader
Python27/Lib/site-packages/setuptools/glob.py
242
5207
""" Filename globbing utility. Mostly a copy of `glob` from Python 3.5. Changes include: * `yield from` and PEP3102 `*` removed. * `bytes` changed to `six.binary_type`. * Hidden files are not ignored. """ import os import re import fnmatch from setuptools.extern.six import binary_type __all__ = ["glob", "iglob", "escape"] def glob(pathname, recursive=False): """Return a list of paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ return list(iglob(pathname, recursive=recursive)) def iglob(pathname, recursive=False): """Return an iterator which yields the paths matching a pathname pattern. The pattern may contain simple shell-style wildcards a la fnmatch. However, unlike fnmatch, filenames starting with a dot are special cases that are not matched by '*' and '?' patterns. If recursive is true, the pattern '**' will match any files and zero or more directories and subdirectories. """ it = _iglob(pathname, recursive) if recursive and _isrecursive(pathname): s = next(it) # skip empty string assert not s return it def _iglob(pathname, recursive): dirname, basename = os.path.split(pathname) if not has_magic(pathname): if basename: if os.path.lexists(pathname): yield pathname else: # Patterns ending with a slash should match only directories if os.path.isdir(dirname): yield pathname return if not dirname: if recursive and _isrecursive(basename): for x in glob2(dirname, basename): yield x else: for x in glob1(dirname, basename): yield x return # `os.path.split()` returns the argument itself as a dirname if it is a # drive or UNC path. Prevent an infinite recursion if a drive or UNC path # contains magic characters (i.e. r'\\?\C:'). if dirname != pathname and has_magic(dirname): dirs = _iglob(dirname, recursive) else: dirs = [dirname] if has_magic(basename): if recursive and _isrecursive(basename): glob_in_dir = glob2 else: glob_in_dir = glob1 else: glob_in_dir = glob0 for dirname in dirs: for name in glob_in_dir(dirname, basename): yield os.path.join(dirname, name) # These 2 helper functions non-recursively glob inside a literal directory. # They return a list of basenames. `glob1` accepts a pattern while `glob0` # takes a literal basename (so it only has to check for its existence). def glob1(dirname, pattern): if not dirname: if isinstance(pattern, binary_type): dirname = os.curdir.encode('ASCII') else: dirname = os.curdir try: names = os.listdir(dirname) except OSError: return [] return fnmatch.filter(names, pattern) def glob0(dirname, basename): if not basename: # `os.path.split()` returns an empty basename for paths ending with a # directory separator. 'q*x/' should match only directories. if os.path.isdir(dirname): return [basename] else: if os.path.lexists(os.path.join(dirname, basename)): return [basename] return [] # This helper function recursively yields relative pathnames inside a literal # directory. def glob2(dirname, pattern): assert _isrecursive(pattern) yield pattern[:0] for x in _rlistdir(dirname): yield x # Recursively yields relative pathnames inside a literal directory. def _rlistdir(dirname): if not dirname: if isinstance(dirname, binary_type): dirname = binary_type(os.curdir, 'ASCII') else: dirname = os.curdir try: names = os.listdir(dirname) except os.error: return for x in names: yield x path = os.path.join(dirname, x) if dirname else x for y in _rlistdir(path): yield os.path.join(x, y) magic_check = re.compile('([*?[])') magic_check_bytes = re.compile(b'([*?[])') def has_magic(s): if isinstance(s, binary_type): match = magic_check_bytes.search(s) else: match = magic_check.search(s) return match is not None def _isrecursive(pattern): if isinstance(pattern, binary_type): return pattern == b'**' else: return pattern == '**' def escape(pathname): """Escape all special characters. """ # Escaping is done by wrapping any of "*?[" between square brackets. # Metacharacters do not work in the drive part and shouldn't be escaped. drive, pathname = os.path.splitdrive(pathname) if isinstance(pathname, binary_type): pathname = magic_check_bytes.sub(br'[\1]', pathname) else: pathname = magic_check.sub(r'[\1]', pathname) return drive + pathname
mit
BhallaLab/moose-thalamocortical
gui/glcellloader.py
2
3616
# glcellloader.py --- # # Filename: loadcell.py # Description: # Author: Subhasis Ray # Maintainer: # Created: Thu Feb 4 20:34:34 2010 (+0530) # Version: # Last-Updated: Sat Jun 26 15:50:44 2010 (+0530) # By: Subhasis Ray # Update #: 107 # URL: # Keywords: # Compatibility: # # # Commentary: # # # # # Change log: # # # # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth # Floor, Boston, MA 02110-1301, USA. # # # Code: import sys import os import moose SIMDT = 1e-5 GLDT = 1e-2 RUNTIME = 1000e-3 morphs_dir = '../../../DEMOS/gl-demo/morphologies/' models = {'CA1':'ca1passive.p', 'Mitral': 'mit.p', 'Purkinje1': 'psmall.p', 'Purkinje2': 'Purk2M9s.p', 'Purkinje3': 'Purkinje4M9.p' } CONTEXT = moose.PyMooseBase.getContext() class GLCellLoader(object): def __init__(self, cell_type, host='localhost', port='9999'): '''Cell loader for glcell using glclient''' filepath = morphs_dir + models[cell_type] # Load the channel definitions from bulbchan.g CONTEXT.loadG('../../../DEMOS/mitral-ee/bulbchan.g') cwe = CONTEXT.getCwe() CONTEXT.setCwe('/library') CONTEXT.runG('make_LCa3_mit_usb') CONTEXT.runG('make_Na_rat_smsnn') CONTEXT.runG('make_Na2_rat_smsnn') CONTEXT.runG('make_KA_bsg_yka') CONTEXT.runG('make_KM_bsg_yka') CONTEXT.runG('make_K_mit_usb') CONTEXT.runG('make_K2_mit_usb') # CONTEXT.runG('make_K_slow_usb') CONTEXT.runG('make_Na_mit_usb') CONTEXT.runG('make_Na2_mit_usb') # CONTEXT.runG('make_Ca_mit_conc') # CONTEXT.runG('make_Kca_mit_usb') print 'created channels' CONTEXT.setCwe(cwe) CONTEXT.readCell(filepath, cell_type) self.cell = moose.Cell(cell_type) self.glServer = moose.GLcell('gl_' + cell_type) self.glServer.vizpath = self.cell.path self.glServer.port = port self.glServer.host = host self.glServer.attribute = 'Vm' self.glServer.threshold = 1 self.glServer.sync = 'on' self.glServer.vscale = 1.0 self.glServer.bgcolor = '050050050' self.glServer.highvalue = 0.05 self.glServer.loader = -0.1 if cell_type == 'Mitral': self.glServer.vscale = 10.0 # ** Assuming every cell has a top-level compartment called # ** soma self.pulsegen = moose.PulseGen('pg_' + cell_type) self.pulsegen.firstDelay = 5e-3 self.pulsegen.firstWidth = 50e-3 self.pulsegen.firstLevel = 1e-9 self.pulsegen.connect('outputSrc', moose.Compartment(self.cell.path + '/soma'), 'injectMsg') if __name__ == '__main__': if len(sys.argv) > 1: loader = GLCellLoader(sys.argv[1]) else: loader = GLCellLoader('Purkinje3') print 'loaded morphology file' CONTEXT.setClock(0, SIMDT) CONTEXT.setClock(1, SIMDT) CONTEXT.setClock(2, SIMDT) CONTEXT.setClock(4, GLDT) CONTEXT.useClock(4, '/#[TYPE=GLcell]') print 'Before reset' CONTEXT.reset() print 'After reset' CONTEXT.step(RUNTIME) # # glcellloader.py ends here
lgpl-2.1
cjcjameson/gpdb
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/crash/test_failover_recovery_mode.py
7
8700
""" Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import socket import tinctest import unittest2 as unittest from tinctest.models.scenario import ScenarioTestCase from mpp.gpdb.tests.storage.walrepl.gpactivatestandby import GpactivateStandby from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby from mpp.gpdb.tests.storage.walrepl.crash import WalReplKillProcessTestCase from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility from mpp.gpdb.tests.storage.walrepl import lib as walrepl pgutil = GpUtility() class WalReplKillProcessScenarioTestCase(ScenarioTestCase): origin_mdd = os.environ.get('MASTER_DATA_DIRECTORY') def __init__(self, methodName): self.standby_dir = os.environ.get('MASTER_DATA_DIRECTORY') self.pgdatabase = self.pgdatabase = os.environ.get('PGDATABASE') super(WalReplKillProcessScenarioTestCase,self).__init__(methodName) def setUp(self): pgutil.check_and_start_gpdb() # We should forcibly recreate standby, as it might has been promoted. # here we need to install locally, otherwise can not run remote sql pgutil.remove_standby() pgutil.install_standby(new_stdby_host=socket.gethostname()) gpact_stdby = GpactivateStandby() gpinit_stdb = GpinitStandby() WalReplKillProcessTestCase.stdby_port = gpact_stdby.get_standby_port() WalReplKillProcessTestCase.stdby_host = gpinit_stdb.get_standbyhost() self.standby_dir = gpact_stdby.get_standby_dd() def tearDown(self): walrepl.cleanupFilespaces(dbname=os.environ.get('PGDATABASE')) @classmethod def setUpClass(cls): pgutil.check_and_start_gpdb() gp_walrepl = WalReplKillProcessTestCase('initial_setup') gp_walrepl.initial_setup() @classmethod def tearDownClass(cls): pgutil.remove_standby() def test_failover_run__workload(self): ''' activate the standby, run workload, check master and standby integrity, currently support local standby, can not run workload remotely ''' activatestdby = GpactivateStandby() activatestdby.activate() with walrepl.NewEnv(MASTER_DATA_DIRECTORY=self.standby_dir, PGPORT=WalReplKillProcessTestCase.stdby_port, PGDATABASE=self.pgdatabase) as env: test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list3) pgutil.failback_to_original_master(self.origin_mdd, WalReplKillProcessTestCase.stdby_host, self.standby_dir,WalReplKillProcessTestCase.stdby_port) def test_initstandby_run_workload(self): #run workload while initstandby, check master mirror integrity pgutil.remove_standby() test_case_list0 = [] test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.gpinitstandby_helper") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list2) def test_initstandby_after_run_workload(self): #run workload before initstandby, check master mirror integrity pgutil.remove_standby() test_case_list0 = [] test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.gpinitstandby_helper") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list2) test_case_list3 = [] test_case_list3.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list3) def test_run_workload_with_standby(self): #run workload while initstandby already installed, check master mirror integrity test_case_list0 = [] test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list2) def test_run_workload_remove_standby(self): #run workload while removing initstandby, check master mirror integrity test_case_list0 = [] test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.removestandby_helper") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list0) test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list2) def test_run_workload_before_activate_standby(self): #run workload while removing initstandby, check master mirror integrity activatestdby = GpactivateStandby() test_case_list0 = [] test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.dml.test_dml.DMLTestCase") test_case_list0.append("mpp.gpdb.tests.storage.walrepl.crash.ddl.test_ddl.DDLTestCase") self.test_case_scenario.append(test_case_list0) activatestdby.activate() test_case_list1 = [] test_case_list1.append("mpp.gpdb.tests.storage.walrepl.crash.WalReplKillProcessTestCase.check_mirror_seg") self.test_case_scenario.append(test_case_list1) test_case_list2 = [] test_case_list2.append("mpp.gpdb.tests.storage.walrepl.crash.verify.verify.DataVerifyTestCase") self.test_case_scenario.append(test_case_list2) pgutil.failback_to_original_master(self.origin_mdd,WalReplKillProcessTestCase.stdby_host, self.standby_dir,WalReplKillProcessTestCase.stdby_port)
apache-2.0
trolldbois/python-haystack-reverse
test/haystack/reverse/test_re_string.py
1
8993
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for haystack.reverse.structure.""" from __future__ import print_function import logging import unittest from haystack.reverse import re_string __author__ = "Loic Jaquemet" __copyright__ = "Copyright (C) 2012 Loic Jaquemet" __license__ = "GPL" __maintainer__ = "Loic Jaquemet" __email__ = "loic.jaquemet+python@gmail.com" __status__ = "Production" class TestReString(unittest.TestCase): @classmethod def setUpClass(cls): # context.get_context('test/src/test-ctypes3.dump') cls.context = None cls.test1 = b'''C\x00:\x00\\\x00U\x00s\x00e\x00r\x00s\x00\\\x00j\x00a\x00l\x00\\\x00A\x00p\x00p\x00D\x00a\x00t\x00a\x00\\\x00R\x00o\x00a\x00m\x00i\x00n\x00g\x00\\\x00M\x00i\x00c\x00r\x00o\x00s\x00o\x00f\x00t\x00\\\x00I\x00n\x00t\x00e\x00r\x00n\x00e\x00t\x00 \x00E\x00x\x00p\x00l\x00o\x00r\x00e\x00r\x00\\\x00Q\x00u\x00i\x00c\x00k\x00 \x00L\x00a\x00u\x00n\x00c\x00h\x00\\\x00d\x00e\x00s\x00k\x00t\x00o\x00p\x00.\x00i\x00n\x00i\x00\x00\x00''' cls.test2 = b'''\x4C\x00\x6F\x00\xEF\x00\x63\x00\x20\x00\x4A\x00\x61\x00\x71\x00\x75\x00\x65\x00\x6D\x00\x65\x00\x74\x00\x00\x00''' cls.test3 = b'''\\\x00R\x00E\x00G\x00I\x00S\x00T\x00R\x00Y\x00\\\x00U\x00S\x00E\x00R\x00\\\x00S\x00-\x001\x00-\x005\x00-\x002\x001\x00-\x002\x008\x008\x004\x000\x006\x003\x000\x007\x003\x00-\x003\x003\x002\x009\x001\x001\x007\x003\x002\x000\x00-\x003\x008\x001\x008\x000\x003\x009\x001\x009\x009\x00-\x001\x000\x000\x000\x00_\x00C\x00L\x00A\x00S\x00S\x00E\x00S\x00\\\x00W\x00o\x00w\x006\x004\x003\x002\x00N\x00o\x00d\x00e\x00\\\x00C\x00L\x00S\x00I\x00D\x00\\\x00{\x007\x006\x007\x006\x005\x00B\x001\x001\x00-\x003\x00F\x009\x005\x00-\x004\x00A\x00F\x002\x00-\x00A\x00C\x009\x00D\x00-\x00E\x00A\x005\x005\x00D\x008\x009\x009\x004\x00F\x001\x00A\x00}\x00''' cls.test4 = b'''edrtfguyiopserdtyuhijo45567890oguiy4e65rtiu\x07\x08\x09\x00''' cls.test5 = b'''edrt\x00fguyiopserdtyuhijo45567890oguiy4e65rtiu\xf1\x07\x08\x09\x00\x00''' cls.test6 = b'''\xf3drtfguyiopserdtyuhijo45567890oguiy4e65rtiu\xf1\x07\x08\x09\x00''' cls.test7 = b'\x1e\x1c\x8c\xd8\xcc\x01\x00' # pure crap cls.test8 = b'C\x00:\x00\\\x00W\x00i\x00n\x00d\x00o\x00w\x00s\x00\\\x00S\x00y\x00s\x00t\x00e\x00m\x003\x002\x00\\\x00D\x00r\x00i\x00v\x00e\x00r\x00S\x00t\x00o\x00r\x00e\x00\x00\x00\xf1/\xa6\x08\x00\x00\x00\x88,\x00\x00\x00C\x00:\x00\\\x00P\x00r\x00o\x00g\x00r\x00a\x00m\x00 \x00F\x00i\x00l\x00e\x00s\x00 \x00(\x00x\x008\x006\x00)\x00\x00\x00P\x00u\x00T\x00Y\x00' cls.test9 = b'\x01\x01@\x00C\x00:\x00\\\x00W\x00i\x00n\x00d\x00o\x00w\x00s\x00' cls.test10 = b'''\x4C\x6F\xEF\x63\x20\x4A\x61\x71\x75\x65\x6D\x65\x74''' cls.test11 = b'D\x00c\x00o\x00m\x00L\x00a\x00u\x00n\x00c\x00h\x00\x00\x00T\x00e\x00r\x00m\x00S\x00e\x00r\x00v\x00i\x00c\x00e\x00\x00\x00\x00\x00' def setUp(self): pass def tearDown(self): pass def test_startsWithNulTerminatedString(self): # self.skipTest('') size, codec, txt = re_string.startsWithNulTerminatedString(self.test1) self.assertEqual(size, len(self.test1)) pass @unittest.expectedFailure def test_try_decode_string(self): # self.skipTest('') size, codec, txt = re_string.try_decode_string(self.test1) self.assertEqual(size, len(self.test1)) size, codec, txt = re_string.try_decode_string(self.test2) self.assertEqual(size, len(self.test2)) size, codec, txt = re_string.try_decode_string(self.test3) self.assertEqual(size, len(self.test3)) size, codec, txt = re_string.try_decode_string(self.test4) self.assertEqual(size, len(self.test4) - 4) size, codec, txt = re_string.try_decode_string(self.test5) self.assertEqual(size, len(self.test5) - 5) ret = re_string.try_decode_string(self.test7) self.assertFalse(ret) size, codec, txt = re_string.try_decode_string(self.test8) self.assertEqual(size, len(self.test8)) pass def test_testEncoding(self): # self.skipTest('') uni = self.test1 size, encoded = re_string.testEncoding(uni, 'utf-16le') self.assertEqual(size, len(uni)) x3 = self.test2 size, encoded = re_string.testEncoding(x3, 'utf-16le') self.assertEqual(size, len(x3)) size, encoded = re_string.testEncoding(self.test4, 'utf-16le') self.assertEqual(size, -1) size, encoded = re_string.testEncoding(self.test4, 'utf-8') self.assertEqual(size, len(self.test4)) pass def test_testAllEncodings(self): # self.skipTest('') uni = self.test1 solutions = re_string.testAllEncodings(uni) size, codec, encoded = solutions[0] self.assertEqual(size, len(uni), '%s' % codec) x3 = self.test2 solutions = re_string.testAllEncodings(x3) size, codec, encoded = solutions[0] self.assertEqual(size, len(x3)) solutions = re_string.testAllEncodings(self.test3) size, codec, encoded = solutions[0] self.assertEqual(size, len(self.test3)) solutions = re_string.testAllEncodings(self.test4) size, codec, encoded = solutions[0] self.assertEqual(size, len(self.test4)) pass def test_nocopy_class(self): # self.skipTest('') s = '1234567890' x = re_string.Nocopy(s, 2, 9) x1 = s[2:9] self.assertEqual(len(x), len(x1)) for i in range(len(x)): self.assertEqual(x[i], x1[i]) # val = x[2:4] self.assertEqual(val, '56') self.assertEqual(val, x[2:4]) self.assertEqual(s[4:-1], x[2:]) self.assertEqual(s[2:-1], x[:16]) self.assertEqual(s[2:-1], x[:]) self.assertEqual(s[2:-1], x[0:]) self.assertEqual(s[2:-1], x) # try more val = x[2:6:2] self.assertEqual(val, '57') self.assertEqual(re_string.Nocopy(s, 9, 10), s[9:10]) self.assertEqual(re_string.Nocopy(s, 9, 10), '0') self.assertEqual(re_string.Nocopy(s, -2, -1), '9') # self.assertRaises(re_string.Nocopy(s,9,11)) def test_rfind_utf16(self): # print len(self.test1) self.assertEqual(0, re_string.rfind_utf16(self.test1, 0, len(self.test1), True, 4)) self.assertEqual(0, re_string.rfind_utf16(self.test2, 0, len(self.test2), True, 4)) self.assertEqual(0, re_string.rfind_utf16(self.test3, 0, len(self.test3), True, 4)) self.assertEqual(-1, re_string.rfind_utf16(self.test4, 0, len(self.test4), True, 4)) self.assertEqual(-1, re_string.rfind_utf16(self.test5, 0, len(self.test5), True, 4)) self.assertEqual(-1, re_string.rfind_utf16(self.test6, 0, len(self.test6), True, 4)) self.assertEqual(-1, re_string.rfind_utf16(self.test7, 0, len(self.test7), True, 4)) # truncated last field # print repr(self.test8[120:]) self.assertEqual(122, re_string.rfind_utf16(self.test8, 0, len(self.test8), False, 4)) # find start with limited size self.assertEqual(0, re_string.rfind_utf16(self.test8, 0, 64, True, 4)) # middle field ( 12+64 ) self.assertEqual(12, re_string.rfind_utf16(self.test8, 64, 58, True, 4)) # non aligned middle field ? # TODO self.assertEqual( 4, re_string.rfind_utf16(self.test9, 0, # len(self.test9) )) ## # self.assertEqual(0, re_string.rfind_utf16(self.test11, 0, 48, False, 4)) print(re_string.rfind_utf16(self.test11, 0, 44, False, 4)) def test_find_ascii(self): self.assertEqual( (-1, -1), re_string.find_ascii(self.test1, 0, len(self.test1))) self.assertEqual( (0, 43), re_string.find_ascii( self.test4, 0, len( self.test4))) self.assertEqual( (0, 4), re_string.find_ascii( self.test5, 0, len( self.test5))) self.assertEqual( (0, 39), re_string.find_ascii( self.test5, 5, len( self.test5) - 5)) self.assertEqual( (-1, -1), re_string.find_ascii(self.test6, 0, len(self.test6))) self.assertEqual( (0, 42), re_string.find_ascii( self.test6, 1, len( self.test6) - 1)) self.assertEqual( (-1, -1), re_string.find_ascii(self.test10, 0, len(self.test10))) # too small self.assertEqual( (0, 10), re_string.find_ascii( self.test10, 3, len( self.test10) - 3)) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) # logging.getLogger("re_string").setLevel(level=logging.DEBUG) unittest.main(verbosity=0) #suite = unittest.TestLoader().loadTestsFromTestCase(TestFunctions) # unittest.TextTestRunner(verbosity=2).run(suite)
gpl-3.0
bitforks/robofab
Lib/robofab/tools/remote.py
10
4500
"""Remote control for MacOS FontLab. initFontLabRemote() registers a callback for appleevents and runFontLabRemote() sends the code from a different application, such as a Mac Python IDE or Python interpreter. """ from robofab.world import world if world.inFontLab and world.mac is not None: from Carbon import AE as _AE else: import sys from aetools import TalkTo class FontLab(TalkTo): pass __all__ = ['initFontLabRemote', 'runFontLabRemote'] def _executePython(theAppleEvent, theReply): import aetools import cStringIO import traceback import sys parms, attrs = aetools.unpackevent(theAppleEvent) source = parms.get("----") if source is None: return stdout = cStringIO.StringIO() #print "<executing remote command>" save = sys.stdout, sys.stderr sys.stdout = sys.stderr = stdout namespace = {} try: try: exec source in namespace except: traceback.print_exc() finally: sys.stdout, sys.stderr = save output = stdout.getvalue() aetools.packevent(theReply, {"----": output}) _imported = False def initFontLabRemote(): """Call this in FontLab at startup of the application to switch on the remote.""" print "FontLabRemote is on." _AE.AEInstallEventHandler("Rfab", "exec", _executePython) if world.inFontLab and world.mac is not None: initFontLabRemote() def runFontLabRemote(code): """Call this in the MacOS Python IDE to make FontLab execute the code.""" fl = FontLab("FLab", start=1) ae, parms, attrs = fl.send("Rfab", "exec", {"----": code}) output = parms.get("----") return output # GlyphTransmit # Convert a glyph to a string using digestPen, transmit string, unpack string with pointpen. # def Glyph2String(glyph): from robofab.pens.digestPen import DigestPointPen import pickle p = DigestPointPen(glyph) glyph.drawPoints(p) info = {} info['name'] = glyph.name info['width'] = glyph.width info['points'] = p.getDigest() return str(pickle.dumps(info)) def String2Glyph(gString, penClass, font): import pickle if gString is None: return None info = pickle.loads(gString) name = info['name'] if not name in font.keys(): glyph = font.newGlyph(name) else: glyph = font[name] pen = penClass(glyph) for p in info['points']: if p == "beginPath": pen.beginPath() elif p == "endPath": pen.endPath() else: pt, type = p pen.addPoint(pt, type) glyph.width = info['width'] glyph.update() return glyph _makeFLGlyph = """ from robofab.world import CurrentFont from robofab.tools.remote import receiveGlyph code = '''%s''' receiveGlyph(code, CurrentFont()) """ def transmitGlyph(glyph): from robofab.world import world if world.inFontLab and world.mac is not None: # we're in fontlab, on a mac print Glyph2String(glyph) pass else: remoteProgram = _makeFLGlyph%Glyph2String(glyph) print "remoteProgram", remoteProgram return runFontLabRemote(remoteProgram) def receiveGlyph(glyphString, font=None): from robofab.world import world if world.inFontLab and world.mac is not None: # we're in fontlab, on a mac from robofab.pens.flPen import FLPointPen print String2Glyph(glyphString, FLPointPen, font) pass else: from robofab.pens.rfUFOPen import RFUFOPointPen print String2Glyph(glyphString, RFUFOPointPen, font) # # command to tell FontLab to open a UFO and save it as a vfb def os9PathConvert(path): """Attempt to convert a unix style path to a Mac OS9 style path. No support for relative paths! """ if path.find("/Volumes") == 0: # it's on the volumes list, some sort of external volume path = path[len("/Volumes")+1:] elif path[0] == "/": # a dir on the root volume path = path[1:] new = path.replace("/", ":") return new _remoteUFOImportProgram = """ from robofab.objects.objectsFL import NewFont import os.path destinationPathVFB = "%(destinationPathVFB)s" font = NewFont() font.readUFO("%(sourcePathUFO)s", doProgress=True) font.update() font.save(destinationPathVFB) print font, "done" font.close() """ def makeVFB(sourcePathUFO, destinationPathVFB=None): """FontLab convenience function to import a UFO and save it as a VFB""" import os fl = FontLab("FLab", start=1) if destinationPathVFB is None: destinationPathVFB = os.path.splitext(sourcePathUFO)[0]+".vfb" src9 = os9PathConvert(sourcePathUFO) dst9 = os9PathConvert(destinationPathVFB) code = _remoteUFOImportProgram%{'sourcePathUFO': src9, 'destinationPathVFB':dst9} ae, parms, attrs = fl.send("Rfab", "exec", {"----": code}) output = parms.get("----") return output
bsd-3-clause
bcl/pykickstart
pykickstart/commands/unsupported_hardware.py
2
2002
# # Brian C. Lane <bcl@redhat.com> # # Copyright 2012 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # from pykickstart.version import RHEL6 from pykickstart.base import KickstartCommand from pykickstart.options import KSOptionParser class RHEL6_UnsupportedHardware(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=0, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.op = self._getParser() self.unsupported_hardware = kwargs.get("unsupported_hardware", False) def __str__(self): retval = KickstartCommand.__str__(self) if self.unsupported_hardware: retval += "unsupported_hardware\n" return retval def _getParser(self): op = KSOptionParser(prog="unsupported_hardware", description="allow installation to proceed on systems with tainted hardware", version=RHEL6) return op def parse(self, args): self.op.parse_args(args=args, lineno=self.lineno) self.unsupported_hardware = True return self
gpl-2.0
saullocastro/pyNastran
pyNastran/utils/atmosphere2.py
1
25930
""" Contains the following atmospheric functions: - density = atm_density(alt, mach) - mach = atm_mach(alt, velocity) - velocity = atm_velocity(alt, mach) - pressure = atm_pressure(alt) - temperature = atm_temperature(alt) - sos = atm_speed_of_sound(alt) - mu = atm_dynamic_viscosity_mu(alt) - nu = atm_kinematic_viscosity_nu(alt) - eas = atm_equivalent_airspeed(alt, mach) """ from __future__ import print_function import sys from math import log, exp import numpy as np def _update_alt(alt, alt_units='ft', debug=False): """ Converts altitude alt_units to feet Parameters ---------- alt : float altitude in feet or meters alt_units : str; default='ft' sets the units for altitude TODO: remove default Returns ------- alt2 : float altitude in feet """ if alt_units == 'ft': factor = 1. elif alt_units == 'm': factor = 1. / 0.3048 elif alt_units == 'kft': factor = 1000. else: raise RuntimeError('alt_units=%r is not valid; use [ft, m, kft]' % alt_units) alt2 = alt * factor if debug: if alt_units == 'ft': SI = False else: SI = True if SI: print("z = %s [m] = %s [ft]" % (alt, alt2)) else: print("z = %s [m] = %s [ft]" % (alt * _feet_to_alt_units(alt_units), alt2)) return alt2 def _update_velocity(velocity, velocity_units='ft/s', debug=False): """ Converts altitude alt_units to feet Parameters ---------- velocity : float altitude in feet or meters velocity_units : str; default='ft/s' sets the units for altitude TODO: remove default Returns ------- velocity2 : float velocity in feet/s """ if velocity_units == 'ft/s': factor = 1. elif velocity_units == 'm/s': factor = 1. / 0.3048 elif velocity_units == 'knots': factor = 1.68781 else: msg = 'velocity_units=%r is not valid; use [ft/s, m/s, knots]' % velocity_units raise RuntimeError(msg) velocity2 = velocity * factor #if debug: #if SI: #print("z = %s [m] = %s [ft]" % (alt, alt2)) #else: #print("z = %s [m] = %s [ft]" % (alt * _feet_to_meters(True), alt2)) return velocity2 def get_alt_for_density(density): """ Gets the altitude associated with a given air density. Parameters ---------- density : float the air density in slug/ft^3 Returns ------- alt : float the altitude in feet """ dalt = 500. alt_old = 0. alt_final = 5000. n = 0 tol = 5. # ft # Newton's method while abs(alt_final - alt_old) > tol and n < 20: alt_old = alt_final alt1 = alt_old alt2 = alt_old + dalt rho1 = atm_density(alt1) rho2 = atm_density(alt2) m = dalt / (rho2 - rho1) alt_final = m * (density - rho1) + alt1 n += 1 if n > 18: print('n = %s' % n) return alt_final def get_alt_for_eas_mach(equivalent_airspeed, mach, SI=False): """ Gets the altitude associated with a equivalent airspeed. Parameters ---------- equivalent_airspeed : float the equivalent airspeed in ft/s (SI=m/s) mach : float the mach to hold constant SI : bool should SI units be used; default=False Returns ------- alt : float the altitude in ft (SI=m) """ if SI: equivalent_airspeed /= 0.3048 # m/s to ft/s dalt = 500. alt_old = 0. alt_final = 5000. n = 0 tol = 5. # ft R = 1716. z0 = 0. T0 = atm_temperature(z0) p0 = atm_pressure(z0) k = np.sqrt(T0 / p0) #eas = a * mach * sqrt((p * T0) / (T * p0)) = a * mach * sqrt(p / T) * k # Newton's method while abs(alt_final - alt_old) > tol and n < 20: alt_old = alt_final alt1 = alt_old alt2 = alt_old + dalt T1 = atm_temperature(alt1) T2 = atm_temperature(alt2) press1 = atm_pressure(alt1) press2 = atm_pressure(alt2) sos1 = np.sqrt(1.4 * R * T1) sos2 = np.sqrt(1.4 * R * T2) eas1 = sos1 * mach * np.sqrt(press1 / T1) * k eas2 = sos2 * mach * np.sqrt(press2 / T2) * k m = dalt / (eas2 - eas1) alt_final = m * (equivalent_airspeed - eas1) + alt1 n += 1 if n > 18: print('n = %s' % n) if SI: alt_final *= 0.3048 # feet to meters return alt_final def get_alt_for_q_mach(q, mach, SI=False): """ Gets the altitude associated with a equivalent airspeed. Parameters ---------- q : float the dynamic pressure lb/ft^2 (SI=Pa) mach : float the mach to hold constant SI : bool should SI units be used; default=False Returns ------- alt : float the altitude in ft (SI=m) """ pressure = 2 * q / (1.4 * mach ** 2) # gamma = 1.4 alt = get_alt_for_pressure(pressure, SI=SI) return alt def get_alt_for_pressure(pressure, SI=False): """ Gets the altitude associated with a equivalent airspeed. Parameters ---------- pressure : float the pressure lb/ft^2 (SI=Pa) SI : bool should SI units be used; default=False Returns ------- alt : float the altitude in ft (SI=m) """ if SI: pressure /= 47.880259 # Pa to psf dalt = 500. alt_old = 0. alt_final = 5000. n = 0 tol = 5. # ft # Newton's method while abs(alt_final - alt_old) > tol and n < 20: alt_old = alt_final alt1 = alt_old alt2 = alt_old + dalt press1 = atm_pressure(alt1) press2 = atm_pressure(alt2) m = dalt / (press2 - press1) alt_final = m * (pressure - press1) + alt1 n += 1 if n > 18: print('n = %s' % n) if SI: alt_final *= 0.3048 # feet to meters return alt_final def _feet_to_alt_units(alt_units): if alt_units == 'm': factor = 0.3048 elif alt_units == 'ft': factor = 1. else: raise RuntimeError('alt_units=%r is not valid; use [ft, m]' % alt_units) return factor def _convert_alt(alt, alt_units_in, alt_units_out): factor = 1.0 # units to feet if alt_units_in == 'm': factor /= 0.3048 elif alt_units_in == 'ft': pass elif alt_units_in == 'kft': factor *= 1000. else: raise RuntimeError('alt_units_in=%r is not valid; use [ft, m, kft]' % alt_units_in) #print('alt=%.1f alt_units_in=%s alt_mid=%.1f ft' % ( #alt, alt_units_in, alt*factor)) # ft to m if alt_units_out == 'm': factor *= 0.3048 elif alt_units_out == 'ft': pass elif alt_units_out == 'kft': factor /= 1000. else: raise RuntimeError('alt_units_out=%r is not valid; use [ft, m, kft]' % alt_units_out) #print('alt=%.1f alt_units_in=%s alt_units_out=%s alt2=%.1f' % ( #alt, alt_units_in, alt_units_out, alt*factor)) return alt * factor def _convert_velocity(velocity, velocity_units_in, velocity_units_out): factor = 1.0 if velocity_units_in == 'm/s': factor /= 0.3048 elif velocity_units_in == 'ft/s': pass elif velocity_units_in == 'knots': factor *= 1.68781 else: msg = 'velocity_units_in=%r is not valid; use [ft/s, m/s, knots]' % velocity_units_in raise RuntimeError(msg) #print('velocity=%.1f velocity_units_in=%s velocity_mid=%.1f' % ( #velocity, velocity_units_in, velocity * factor)) if velocity_units_out == 'm/s': factor *= 0.3048 elif velocity_units_out == 'ft/s': pass elif velocity_units_out == 'knots': factor /= 1.68781 else: msg = 'velocity_units_in=%r is not valid; use [ft/s, m/s, knots]' % velocity_units_in raise RuntimeError(msg) #print('velocity=%.1f velocity_units_in=%s velocity_units_out=%s velocity2=%.1f' % ( #velocity, velocity_units_in, velocity_units_out, velocity * factor)) return velocity * factor def _feet_s_to_velocity_units(velocity_units): if velocity_units == 'm/s': factor = 0.3048 elif velocity_units == 'ft/s': factor = 1. elif velocity_units == 'knots': factor = 1. / 1.68781 else: raise RuntimeError('alt_units=%r is not valid; use [ft, m]' % velocity_units) return factor def _rankine_to_kelvin(SI): if SI: factor = 5 / 9. else: factor = 1. return factor def _psf_to_pressure_units(pressure_units): if pressure_units == 'psf': factor = 1. elif pressure_units == 'Pa': factor = 47.880259 else: raise RuntimeError('pressure_units=%r is not valid; use [psf, Pa]' % pressure_units) return factor def atm_temperature(alt, alt_units='ft', temperature_units='R', debug=False): r""" Freestream Temperature \f$ T_{\infty} \f$ Parameters ---------- alt : bool Altitude in feet or meters (SI) SI : bool; default=False returns temperature in SI units if True Returns ------- T : float temperature in degrees Rankine or Kelvin (SI) .. note :: from BAC-7006-3352-001-V1.pdf # Bell Handbook of Aerodynamic Heating\n page ~236 - Table C.1\n These equations were used because they are valid to 300k ft.\n Extrapolation is performed above that. """ z = _update_alt(alt, alt_units) if z < 36151.725: T = 518.0-0.003559996 * z elif z < 82344.678: T = 389.988 elif z < 155347.756: T = 389.988+.0016273286 * (z - 82344.678) elif z < 175346.171: T = 508.788 elif z < 249000.304: T = 508.788-.0020968273 * (z - 175346.171) elif z < 299515.564: T = 354.348 else: print("alt=%i kft > 299.5 kft" % (z / 1000.)) T = 354.348 #raise AtmosphereError("altitude is too high") if temperature_units == 'R': factor = 1. elif temperature_units == 'K': factor = 5. / 9. else: raise RuntimeError('temperature_units=%r is not valid; use [ft, m]' % temperature_units) T2 = T * factor #if debug: #if SI: #print("z = %s [m] = %s [ft]" % (alt, z)) #print("T = %s [K] = %s [R]" % (T2, T)) #else: #print("z = %s [m] = %s [ft]" % (alt * _feet_to_meters(True), z)) #print("T = %s [K] = %s [R]" % (T * _rankine_to_kelvin(True), T2)) return T2 def atm_pressure(alt, alt_units='ft', pressure_units='psf', debug=False): r""" Freestream Pressure \f$ p_{\infty} \f$ Parameters ---------- alt : float Altitude in feet or meters (SI) SI : bool; default=False returns pressure in SI units if True Returns ------- pressure : float Returns pressure in psf or Pa (SI) .. note :: from BAC-7006-3352-001-V1.pdf # Bell Handbook of Aerodynamic Heating\n page ~236 - Table C.1\n These equations were used b/c they are valid to 300k ft.\n Extrapolation is performed above that.\n """ z = _update_alt(alt, alt_units=alt_units) if z < 36151.725: lnP = 7.657389 + 5.2561258 * log(1 - 6.8634634E-6 * z) elif z < 82344.678: lnP = 6.158411 - 4.77916918E-5 * (z-36151.725) elif z < 155347.756: lnP = 3.950775 - 11.3882724 * log(1.0 + 4.17276598E-6 * (z - 82344.678)) elif z < 175346.171: lnP = 0.922461 - 3.62635373E-5*(z - 155347.756) elif z < 249000.304: lnP = 0.197235 + 8.7602095 * log(1.0 - 4.12122002E-6 * (z - 175346.171)) elif z < 299515.564: lnP = -2.971785 - 5.1533546650E-5 * (z - 249000.304) else: print("alt=%i kft > 299.5 kft" % (z / 1000.)) lnP = -2.971785 - 5.1533546650E-5 * (z - 249000.304) p = exp(lnP) if pressure_units == 'psf': factor = 1. elif pressure_units == 'Pa': factor = 47.880259 # psf to Pa else: raise RuntimeError('pressure_units=%r is not valid; use [psf, Pa]' % pressure_units) #if debug: #ft_to_m = _feet_to_meters(True) #if SI: #print("z = %s [m] = %s [ft]" % (alt, z)) #print("Patm = %g [Pa] = %g [psf]" % (p * factor, p)) #else: #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, z)) #print("Patm = %g [Pa] = %g [psf]" % (p * _psf_to_pascals(True), p)) return p*factor def atm_dynamic_pressure(alt, mach, alt_units='ft', pressure_units='psf', debug=False): r""" Freestream Dynamic Pressure \f$ q_{\infty} \f$ Parameters ---------- alt : float Altitude in feet or meters (SI) mach : float Mach Number \f$ M \f$ SI : bool returns dynamicPressure in SI units if True (default=False) Returns ------- dynamic_Pressure : float Returns dynamic pressure in lb/ft^2 or Pa (SI). The common method that requires many calculations... \f[ \large q = \frac{1}{2} \rho V^2 \f] \f[ \large p = \rho R T \f] \f[ \large M = \frac{V}{a} \f] \f[ \large a = \sqrt{\gamma R T} \f] so... \f[ \large q = \frac{\gamma}{2} p M^2 \f] """ z = _update_alt(alt, alt_units) p = atm_pressure(z) q = 0.7 * p * mach ** 2 factor = _psf_to_pressure_units(pressure_units) q2 = q * factor #if debug: #ft_to_m = _feet_to_meters(True) #if SI: #print("z = %s [m] = %s [ft]" % (alt, z)) #print("p = %s [psf] = %s [Pa]" % (p, p * factor)) #print("q = %s [psf] = %s [Pa]" % (q, q2)) #else: #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, z)) #print("p = %s [psf] = %s [Pa]" % (p, p * _psf_to_pascals(True))) #print("q = %s [psf] = %s [Pa]" % (q, q * _psf_to_pascals(True))) return q2 def atm_speed_of_sound(alt, alt_units='ft', velocity_units='ft/s', gamma=1.4, debug=False): r""" Freestream Speed of Sound \f$ a_{\infty} \f$ Parameters ---------- alt : bool Altitude in feet or meters (SI) SI : bool; default=False convert to SI units Returns ------- speed_of_sound, a : float Returns speed of sound in ft/s or m/s (SI). \f[ \large a = \sqrt{\gamma R T} \f] """ # converts everything to English units first z = _update_alt(alt, alt_units) T = atm_temperature(z) R = 1716. # 1716.59, dir air, R=287.04 J/kg*K a = (gamma * R * T) ** 0.5 factor = _feet_s_to_velocity_units(velocity_units) # ft/s to m/s a2 = a * factor #if debug: #ft_to_m = _feet_to_meters(True) #if SI: #print("z = %s [m] = %s [ft]" % (alt, z)) #print("T = %s [K] = %s [R]" % (T / 1.8, T)) #print("a = %s [m/s] = %s [ft/s]" % (a2, a)) #else: #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, z)) #print("T = %s [K] = %s [R]" % (T / 1.8, T)) #print("a = %s [m/s] = %s [ft/s]" % (a * _feet_to_meters(True), a2)) return a2 def atm_velocity(alt, mach, alt_units='ft', velocity_units='ft/s', debug=False): r""" Freestream Velocity \f$ V_{\infty} \f$ alt : float altitude in feet or meters SI : bool; default=False convert velocity to SI units Mach : float Mach Number \f$ M \f$ Returns velocity : float Returns velocity in ft/s or m/s (SI). \f[ \large V = M a \f] """ a = atm_speed_of_sound(alt, alt_units=alt_units, velocity_units=velocity_units) V = mach * a # units=ft/s or m/s #if debug: #ft_to_m = _feet_to_alt_units('m') #if SI: #print("z = %s [m] = %s [ft]" % (alt, alt)) #print("a = %s [m/s] = %s [ft/s]" % (a, a / ft_to_m)) #print("V = %s [m/s] = %s [ft/s]" % (V, V / ft_to_m)) #else: #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, alt)) #print("a = %s [m/s] = %s [ft/s]" % (a * ft_to_m, a)) #print("V = %s [m/s] = %s [ft/s]" % (V * ft_to_m, V)) return V def atm_equivalent_airspeed(alt, mach, alt_units='ft', eas_units='ft/s', debug=False): """ EAS = TAS * sqrt(rho/rho0) p = rho * R * T rho = p/(RT) rho/rho0 = p/T * T0/p0 TAS = a * M EAS = a * M * sqrt(p/T * T0/p0) """ z = _update_alt(alt, alt_units) a = atm_speed_of_sound(z) #V = mach * a # units=ft/s or m/s z0 = 0. T0 = atm_temperature(z0) p0 = atm_pressure(z0) T = atm_temperature(z) p = atm_pressure(z) eas = a * mach * np.sqrt((p * T0) / (T * p0)) if eas_units == 'ft/s': pass elif eas_units == 'knots': eas /= 1.68781 # ft/s to knots elif eas_units == 'm/s': ft_to_m = _feet_to_alt_units('m') eas *= ft_to_m else: raise NotImplementedError(eas_units) #if debug: #if SI: #print("z = %s [m] = %s [ft]" % (alt, z)) #print("a = %s [m/s] = %s [ft/s]" % (a * ft_to_m, a)) #print("eas = %s [m/s] = %s [ft/s]" % (eas, eas / ft_to_m)) #else: #ft_to_m = _feet_to_meters(True) #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, alt)) #print("a = %s [m/s] = %s [ft/s]" % (a * ft_to_m, a)) #print("eas = %s [m/s] = %s [ft/s]" % (eas * ft_to_m, eas)) return eas def atm_mach(alt, V, alt_units='ft', velocity_units='ft/s', debug=False): r""" Freestream Mach Number Parameters ---------- alt : float altitude in feet or meters V : float Velocity in ft/s or m/s (SI) SI : bool; default=False convert velocity to SI units Returns ------- mach : float Mach Number \f$ M \f$ \f[ \large M = \frac{V}{a} \f] """ z = _update_alt(alt, alt_units) a = atm_speed_of_sound(z, alt_units='ft', velocity_units=velocity_units) mach = V / a if debug: print("z = %.1f [m] = %.1f [ft] = %.1f [%s]" % ( _convert_alt(alt, alt_units, 'm'), z, # ft alt, alt_units)) print("a = %.3f [m/s] = %.3f [ft/s] = %.3f [%s]" % ( _convert_velocity(a, velocity_units, 'm/s'), _convert_velocity(a, velocity_units, 'ft/s'), a, velocity_units)) print("V = %.3f [m/s] = %.3f [ft/s] = %.3f [%s]" % ( _convert_velocity(V, velocity_units, 'm/s'), _convert_velocity(V, velocity_units, 'ft/s'), V, velocity_units)) print("M = %.3f" % (mach)) return mach def atm_density(alt, R=1716., alt_units='ft', density_units='slug/ft^3', debug=False): r""" Freestream Density \f$ \rho_{\infty} \f$ Parameters ---------- Parameters ---------- alt : float altitude in feet or meters SI : bool; default=False convert velocity to SI units R : float; default=1716. gas constant for air in english units (???) Returns ------- rho : float density \f$ \rho \f$ in slug/ft^3 or kg/m^3 (SI). Based on the formula P=pRT \f[ \large \rho=\frac{p}{R T} \f] """ z = _update_alt(alt, alt_units) P = atm_pressure(z) T = atm_temperature(z) # going from slug/ft^3 to kg/m^3 if density_units == 'slug/ft^3': factor = 1. elif density_units == 'kg/m^3': factor = 515.378818 #elif density_units == 'slug/in^3': #factor = None else: raise NotImplementedError(density_units) #if debug: #rho = P / (R * T) #ft_to_m = _feet_to_alt_units('m') #if SI: #pressure_units = 'Pa' #print("z = %s [m] = %s [ft]" % (alt, z)) #print("Patm = %g [Pa] = %g [psf]" % (P * _psf_to_pressure_units(pressure_units), P)) #print("T = %s [K] = %s [R]" % (T / 1.8, T)) #print("rho = %e [kg/m^3] = %e [slug/ft^3]" % (rho * 515.378818, rho)) #else: #pressure_units = 'Pa' #print("z = %s [m] = %s [ft]" % (alt * ft_to_m, z)) #print("Patm = %g [Pa] = %g [psf]" % (P * _psf_to_pressure_units(pressure_units), P)) #print("T = %s [K] = %s [R]" % (T / 1.8, T)) #print("rho = %e [kg/m^3] = %e [slug/ft^3]" % (rho * 515.378818, rho)) return P / (R * T) * factor def atm_kinematic_viscosity_nu(alt, alt_units='ft', visc_units='ft^2/s', debug=False): r""" Freestream Kinematic Viscosity \f$ \nu_{\infty} \f$ Parameters ---------- alt : bool Altitude in feet or meters (SI) SI : bool; default=False convert to SI units Returns ------- nu : float kinematic viscosity \f$ \nu_{\infty} \f$ in ft^2/s or m^2/s (SI) \f[ \large \nu = \frac{\mu}{\rho} \f] .. see :: SutherlandVisc .. todo:: better debug """ z = _update_alt(alt, alt_units) rho = atm_density(z) mu = atm_dynamic_viscosity_mu(z) nu = mu / rho if debug: # doesnt work unless US units print("atm_nu - rho=%g [slug/ft^3] mu=%e [lb*s/ft^2] nu=%e [ft^2/s]" % (rho, mu, nu)) if visc_units == 'ft^2/s': factor = 1. elif visc_units == 'm^2/s': factor = _feet_to_alt_units(alt_units) ** 2 return nu * factor def atm_dynamic_viscosity_mu(alt, SI=False): r""" Freestream Dynamic Viscosity \f$ \mu_{\infty} \f$ Parameters ---------- alt : bool Altitude in feet or meters (SI) SI : bool; default=False convert to SI units Returns ------- mu : float dynamic viscosity \f$ \mu_{\infty} \f$ in (lbf*s)/ft^2 or (N*s)/m^2 (SI) .. see :: SutherlandVisc @ todo units... """ z = _update_alt(alt, SI) T = atm_temperature(z) mu = sutherland_viscoscity(T) if SI: return mu * 47.88026 return mu def atm_unit_reynolds_number2(alt, mach, alt_units='ft', ReL_units='1/ft', debug=False): r""" Returns the Reynolds Number per unit length Parameters ---------- alt : bool Altitude in feet or meters (SI) mach : float Mach Number \f$ M \f$ SI : bool; default=False convert to SI units Returns ------- ReynoldsNumber/L : float 1/ft or 1/m (SI) \f[ \large Re_L = \frac{ \rho V}{\mu} = \frac{p M a}{\mu R T} \f] .. note :: this version of Reynolds number directly caculates the base quantities, so multiple calls to atm_press and atm_temp are not made """ z = _update_alt(alt, alt_units) #print("z = ",z) gamma = 1.4 R = 1716. p = atm_pressure(z) T = atm_temperature(z) #p = rhoRT a = (gamma * R * T) ** 0.5 mu = sutherland_viscoscity(T) ReL = p * a * mach / (mu * R * T) if debug: print("---atm_UnitReynoldsNumber2---") print("z = %s [m] = %s [ft]" % (alt * _feet_to_alt_units('m'), z)) print("a = %s [m/s] = %s [ft/s]" % (a * _feet_to_alt_units('m'), a)) rho = p / (R * T) print("rho = %s [kg/m^3] = %s [slug/ft^3]" % (rho * 515.378818, rho)) print("M = %s" % mach) print("V = %s [m/s] = %s [ft/s]" % (a * mach * _feet_to_alt_units('m'), a * mach)) print("T = %s [K] = %s [R]" % (T * 5 / 9., T)) print("mu = %s [(N*s)/m^2] = %s [(lbf*s)/ft^2]" % (mu * 47.88026, mu)) print("Re = %s [1/m] = %s [1/ft]" % (ReL / 0.3048, ReL)) # convert ReL in 1/ft to 1/m if ReL_units == '1/ft': factor = 1. elif ReL_units == '1/m': factor = 1. / .3048 else: raise NotImplementedError(ReL_units) return ReL * factor def atm_unit_reynolds_number(alt, mach, SI=False, debug=False): r""" Returns the Reynolds Number per unit length Parameters ---------- alt : bool Altitude in feet or meters (SI) mach : float Mach Number \f$ M \f$ SI : bool; default=False convert to SI units Returns ------- ReynoldsNumber/L : float 1/ft or 1/m (SI) \f[ \large Re = \frac{ \rho V L}{\mu} \f] \f[ \large Re_L = \frac{ \rho V }{\mu} \f] """ z = _update_alt(alt, SI) rho = atm_density(z) V = atm_velocity(z, mach) mu = atm_dynamic_viscosity_mu(z) ReL = (rho * V) / mu if debug: print("---atm_UnitReynoldsNumber---") print("z = %s [m] = %s [ft]" % (alt * _feet_to_alt_units('m'), z)) print("rho = %s [kg/m^3] = %s [slug/ft^3]" % (rho * 515.378818, rho)) print("V = %s [m/s] = %s [ft/s]" % (V * _feet_to_alt_units('m'), V)) print("mu = %s [(N*s)/m^2] = %s [(lbf*s)/ft^2]" % (mu * 47.88026, mu)) print("Re = %s [1/m] = %s [1/ft]" % (ReL / 0.3048, ReL)) if SI: return ReL / .3048 # convert ReL in 1/ft to 1/m return ReL def sutherland_viscoscity(T): r""" Helper function that calculates the dynamic viscosity \f$ \mu \f$ of air at a given temperature Parameters ---------- T : float Temperature T is in Rankine Returns ------- mu : float dynamic viscosity \f$ \mu \f$ of air in (lbf*s)/ft^2 .. note :: prints a warning if T>5400 deg R .. todo:: Consider raising an error instead of writing to stderr and letting the function return an answer. Sutherland's Equation\n From Aerodynamics for Engineers 4th Edition\n John J. Bertin 2002\n page 6 eq 1.5b\n """ if T < 225.: # Rankine viscosity = 8.0382436E-10 * T else: if T > 5400.: msg = "WARNING: viscosity - Temperature is too large (T>5400) T=%s\n" % T sys.stderr.write(msg) viscosity = 2.27E-8 * (T ** 1.5) / (T + 198.6) return viscosity
lgpl-3.0
iabdalkader/micropython
tools/autobuild/remove_old_firmware.py
4
2047
#!/usr/bin/env python3 import re, subprocess, sys DEBUG = False DRY_RUN = False NUM_KEEP_PER_BOARD = 4 def main(): ssh_machine = sys.argv[1] ssh_firmware_dir = sys.argv[2] # SSH to get list of existing files. p = subprocess.run( ["ssh", ssh_machine, "find", ssh_firmware_dir, "-name", "\\*-unstable-v\\*"], capture_output=True, ) if p.returncode != 0: print(p.stderr) return all_files = p.stdout.split(b"\n") # Parse all files to organise into boards/date/version. boards = {} for file in all_files: m = re.match( rb"([a-z/.]+)/([A-Za-z0-9_-]+)-(20[0-9]{6})-unstable-(v[0-9.-]+-g[0-9a-f]+).", file, ) if not m: continue dir, board, date, version = m.groups() if board not in boards: boards[board] = {} if (date, version) not in boards[board]: boards[board][(date, version)] = [] boards[board][(date, version)].append(file) # Collect files to remove based on date and version. remove = [] for board in boards.values(): filelist = [(date, version, files) for (date, version), files in board.items()] filelist.sort(reverse=True) keep = [] for date, version, files in filelist: if keep and version == keep[-1]: remove.extend(files) elif len(keep) >= NUM_KEEP_PER_BOARD: remove.extend(files) else: keep.append(version) if DEBUG: all_files.sort(reverse=True) for file in all_files: print(file, file in remove) print(len(remove), "/", len(all_files)) # Do removal of files. for file in remove: file = str(file, "ascii") print("remove:", file) if not DRY_RUN: p = subprocess.run(["ssh", ssh_machine, "/bin/rm", file], capture_output=True) if p.returncode != 0: print(p.stderr) if __name__ == "__main__": main()
mit
popazerty/obh-gui
lib/python/Components/Sources/FrontendInfo.py
130
2058
from enigma import iPlayableService from Source import Source from Components.PerServiceDisplay import PerServiceBase from enigma import eDVBResourceManager class FrontendInfo(Source, PerServiceBase): def __init__(self, service_source = None, frontend_source = None, navcore = None): self.navcore = None Source.__init__(self) if navcore: PerServiceBase.__init__(self, navcore, { iPlayableService.evTunedIn: self.updateFrontendData, iPlayableService.evEnd: self.serviceEnd }) res_mgr = eDVBResourceManager.getInstance() if res_mgr: res_mgr.frontendUseMaskChanged.get().append(self.updateTunerMask) self.service_source = service_source self.frontend_source = frontend_source self.tuner_mask = 0 self.updateFrontendData() def serviceEnd(self): # import pdb # pdb.set_trace() self.slot_number = self.frontend_type = None self.changed((self.CHANGED_CLEAR, )) def updateFrontendData(self): data = self.getFrontendData() if not data: self.slot_number = self.frontend_type = None else: self.slot_number = data.get("tuner_number") self.frontend_type = data.get("tuner_type") self.changed((self.CHANGED_ALL, )) def updateTunerMask(self, mask): self.tuner_mask = mask self.changed((self.CHANGED_ALL, )) def getFrontendData(self): if self.frontend_source: frontend = self.frontend_source() dict = { } if frontend: frontend.getFrontendData(dict) return dict elif self.service_source: service = self.navcore and self.service_source() feinfo = service and service.frontendInfo() return feinfo and feinfo.getFrontendData() elif self.navcore: service = self.navcore.getCurrentService() feinfo = service and service.frontendInfo() return feinfo and feinfo.getFrontendData() else: return None def destroy(self): if not self.frontend_source and not self.service_source: PerServiceBase.destroy(self) res_mgr = eDVBResourceManager.getInstance() if res_mgr: res_mgr.frontendUseMaskChanged.get().remove(self.updateTunerMask) Source.destroy(self)
gpl-2.0
Baisang/LearnCoin
contrib/pyminer/pyminer.py
1257
6438
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file license.txt or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
yhteentoimivuuspalvelut/ckanext-ytp-request
ckanext/ytp/request/model.py
1
1860
import uuid import datetime from sqlalchemy import Column, MetaData, ForeignKey from sqlalchemy import types from sqlalchemy.ext.declarative import declarative_base from ckan.lib.base import model log = __import__('logging').getLogger(__name__) Base = declarative_base() metadata = MetaData() """CANCEL state is equivalent to DELETE state in member table. member - member_request is one to many relationship since we need to log all member_requests to facilitate admins and users what has happened with his previous requests """ REQUEST_PENDING = "pending" REQUEST_ACCEPTED = "accepted" REQUEST_REJECTED = "rejected" REQUEST_CANCEL = "cancel" def make_uuid(): return unicode(uuid.uuid4()) class MemberRequest(Base): """ Represents a member request containing request date, handled date, status (pending, approved,rejected, cancel) and language used by the member so that a localized e-mail could be sent Member request stores the request history while member table represents the current state a member has with a given organization """ __tablename__ = 'member_request' id = Column(types.UnicodeText, primary_key=True, default=make_uuid) # Reference to the table containing the composite key for organization and # user membership_id = Column(types.UnicodeText, ForeignKey(model.Member.id)) request_date = Column(types.DateTime, default=datetime.datetime.now) role = Column(types.UnicodeText) handling_date = Column(types.DateTime) handled_by = Column(types.UnicodeText) language = Column(types.UnicodeText) message = Column(types.UnicodeText) status = Column(types.UnicodeText, default=u"pending") def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def init_tables(): Base.metadata.create_all(model.meta.engine)
agpl-3.0
autosportlabs/RaceCapture_App
test/autosportlabs/racecapture/config/test_rcpconfig.py
1
2389
import unittest import json from autosportlabs.racecapture.config.rcpconfig import CANChannels, CANChannel, CANMapping CAN_CHANNEL_1 = '{"nm":"CHAN1", "ut": "UNITS1", "min":-55, "max":55,"prec":2,"sr":25, "bm": false, "type":0, "bus":0, "id":1234, "idMask":0, "offset":23, "len":19, "subId":-1, "mult":1.23, "div":3.45, "add":3, "bigEndian":false, "filtId": 3}' CAN_CHANNEL_2 = '{"nm":"CHAN2", "ut": "UNITS2", "min":-66, "max":66,"prec":3,"sr":50, "bm": true, "type":0, "bus":1, "id":1235, "idMask":0, "offset":24, "len":20, "subId":-1, "mult":1.24, "div":6.78, "add":4, "bigEndian":true, "filtId": 4}' CAN_CHANNELS = '{"en": 1, "chans":[' + CAN_CHANNEL_1 + ',' + CAN_CHANNEL_2 + ']}' class BaseConfigTest(unittest.TestCase): def to_json_string(self, json_dict): return json.dumps(json_dict, separators=(',', ':')) def from_json_string(self, json_string): return json.loads(json_string) def assert_dicts_equal(self, json_dict1, json_dict2): self.assertEqual(json_dict1, json_dict2) class CANMappingTest(BaseConfigTest): def test_can_mapping(self): can_mapping_json_dict = self.from_json_string('{"filtId":3,"bm":false,"idMask":0,"len":19,"subId":-1,"add":3,"mult":1.23,"offset":23,"div":3.45,"bigEndian":false,"type":0,"id":1234,"bus":0}') can_mapping_json_dict_test = CANMapping().from_json_dict(can_mapping_json_dict).to_json_dict() self.assert_dicts_equal(can_mapping_json_dict, can_mapping_json_dict_test) class CANChannelTest(BaseConfigTest): def test_CAN_channel(self): can_channel_1_json_dict = self.from_json_string(CAN_CHANNEL_1) can_channel_1_json_dict_test = CANChannel().from_json_dict(can_channel_1_json_dict).to_json_dict() self.assert_dicts_equal(can_channel_1_json_dict, can_channel_1_json_dict_test) can_channel_2_json_dict = self.from_json_string(CAN_CHANNEL_2) json_channel_2_test = CANChannel().from_json_dict(can_channel_2_json_dict).to_json_dict() self.assert_dicts_equal(can_channel_2_json_dict, json_channel_2_test) class CANChannelsTest(BaseConfigTest): def test_CAN_channels(self): json_CAN_channels = self.from_json_string(CAN_CHANNELS) json_CAN_channels_test = CANChannels().from_json_dict(json_CAN_channels).to_json_dict() self.assert_dicts_equal({'canChanCfg': json_CAN_channels}, json_CAN_channels_test)
gpl-3.0
mormegil-cz/gnubg
scripts/matchseries.py
1
4437
# # matchseries.py # # Martin Janke <lists@janke.net>, 2004 # Achim Mueller <ace@gnubg.org>, 2004 # Joern Thyssen <jth@gnubg.org>, 2004 # # This program is free software; you can redistribute it and/or modify # it under the terms of version 3 or later of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Play an arbitray number of matches. # # For example, the script could be used to play gnubg 0-ply using # the Snowie MET against gnubg 0-ply using the Woolsey-Heinrich MET. # This is achieved by using the external player interface. # # gnubg -t << EOF # set matchequitytable "met/snowie.xml" # set evaluation chequerplay eval plies 0 # set evaluation cubed eval plies 0 # external localhost:10000 # EOF # # gnubg -t << EOF # set matchequitytable "met/woolsey.xml" # set player 1 gnu # set player 1 chequerplay evaluation plies 0 # set player 1 cubedecision evaluation plies 0 # set player 0 external localhost:10000 # > # from matchseries import * # playMatchSeries (matchLength = 3, noOfMatches = 1000, # statsFile = "statistics.txt", sgfBasePath = None, # matBasePath = None) # EOF # # $Id$ # import gnubg def playMatchSeries(statsFile=None, # log file matchLength=7, noOfMatches=100, sgfBasePath=None, # optional matBasePath=None): # optional """Starts noOfMatches matchLength pointers. For every match the running score, gammoms (g) and backgammons (b) and the match winner is written to 'statsFile': g gammon b backgammon (g) gammon, but not relevsnt (end of match) (b) backgammon, but not relevant g(b) backgammon, but gammon would have been enough to win the match If the optional parameters 'sgfBasePath' and 'matBasePath' are set to a path, the matches are saved as sgf or mat file.""" for i in range(0, noOfMatches): if not statsFile: raise ValueError('Parameter "statsFile" is mandatory') gnubg.command('new match ' + str(matchLength)) matchInfo = formatMatchInfo(gnubg.match(analysis=0, boards=0)) f = open(statsFile, 'a') f.write(matchInfo) f.close if sgfBasePath: gnubg.command('save match ' + sgfBasePath + str(i) + '.sgf') if matBasePath: gnubg.command('export match mat ' + matBasePath + str(i) + '.mat') def formatMatchInfo(matchInfo): tempS = '' outString = '' score = [0, 0] matchLength = matchInfo['match-info']['match-length'] for game in matchInfo['games']: pw = game['info']['points-won'] if game['info']['winner'] == 'O': winner = 1 else: winner = 0 oldScore = score[winner] score[winner] += pw cube = getCube(game) print 'cube: ', cube if pw == cube: gammon = '' elif pw == 2 * cube: gammon = 'g' if (cube + oldScore) >= matchLength: # gammon not relevant gammon = '(g)' elif pw == 3 * cube: gammon = 'b' if (cube + oldScore) >= matchLength: # backgammon not relevant gammon = '(b)' elif (2 * cube + oldScore) >= matchLength: # backgammon not relevant, but gammon gammon = 'g(b)' outString += ',%d:%d%s' % (score[0], score[1], gammon) outString = str(winner) + outString + '\n' return outString def getCube(game): """returns the cube value of the game""" doubled = 0 cube = 1 for turn in game['game']: if turn['action'] == 'double': doubled = 1 continue if doubled: if turn['action'] == 'take': cube *= 2 doubled = 0 else: # dropped break return cube
gpl-3.0
stumoodie/PathwayEditor
libs/antlr-3.4/runtime/Python/antlr3/tree.py
19
81569
""" @package antlr3.tree @brief ANTLR3 runtime package, tree module This module contains all support classes for AST construction and tree parsers. """ # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] # lot's of docstrings are missing, don't complain for now... # pylint: disable-msg=C0111 import re from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE from antlr3.recognizers import BaseRecognizer, RuleReturnScope from antlr3.streams import IntStream from antlr3.tokens import CommonToken, Token, INVALID_TOKEN from antlr3.exceptions import MismatchedTreeNodeException, \ MissingTokenException, UnwantedTokenException, MismatchedTokenException, \ NoViableAltException ############################################################################ # # tree related exceptions # ############################################################################ class RewriteCardinalityException(RuntimeError): """ @brief Base class for all exceptions thrown during AST rewrite construction. This signifies a case where the cardinality of two or more elements in a subrule are different: (ID INT)+ where |ID|!=|INT| """ def __init__(self, elementDescription): RuntimeError.__init__(self, elementDescription) self.elementDescription = elementDescription def getMessage(self): return self.elementDescription class RewriteEarlyExitException(RewriteCardinalityException): """@brief No elements within a (...)+ in a rewrite rule""" def __init__(self, elementDescription=None): RewriteCardinalityException.__init__(self, elementDescription) class RewriteEmptyStreamException(RewriteCardinalityException): """ @brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream """ pass ############################################################################ # # basic Tree and TreeAdaptor interfaces # ############################################################################ class Tree(object): """ @brief Abstract baseclass for tree nodes. What does a tree look like? ANTLR has a number of support classes such as CommonTreeNodeStream that work on these kinds of trees. You don't have to make your trees implement this interface, but if you do, you'll be able to use more support code. NOTE: When constructing trees, ANTLR can build any kind of tree; it can even use Token objects as trees if you add a child list to your tokens. This is a tree node without any payload; just navigation and factory stuff. """ def getChild(self, i): raise NotImplementedError def getChildCount(self): raise NotImplementedError def getParent(self): """Tree tracks parent and child index now > 3.0""" raise NotImplementedError def setParent(self, t): """Tree tracks parent and child index now > 3.0""" raise NotImplementedError def hasAncestor(self, ttype): """Walk upwards looking for ancestor with this token type.""" raise NotImplementedError def getAncestor(self, ttype): """Walk upwards and get first ancestor with this token type.""" raise NotImplementedError def getAncestors(self): """Return a list of all ancestors of this node. The first node of list is the root and the last is the parent of this node. """ raise NotImplementedError def getChildIndex(self): """This node is what child index? 0..n-1""" raise NotImplementedError def setChildIndex(self, index): """This node is what child index? 0..n-1""" raise NotImplementedError def freshenParentAndChildIndexes(self): """Set the parent and child index values for all children""" raise NotImplementedError def addChild(self, t): """ Add t as a child to this node. If t is null, do nothing. If t is nil, add all children of t to this' children. """ raise NotImplementedError def setChild(self, i, t): """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" raise NotImplementedError def deleteChild(self, i): raise NotImplementedError def replaceChildren(self, startChildIndex, stopChildIndex, t): """ Delete children from start to stop and replace with t even if t is a list (nil-root tree). num of children can increase or decrease. For huge child lists, inserting children can force walking rest of children to set their childindex; could be slow. """ raise NotImplementedError def isNil(self): """ Indicates the node is a nil node but may still have children, meaning the tree is a flat list. """ raise NotImplementedError def getTokenStartIndex(self): """ What is the smallest token index (indexing from 0) for this node and its children? """ raise NotImplementedError def setTokenStartIndex(self, index): raise NotImplementedError def getTokenStopIndex(self): """ What is the largest token index (indexing from 0) for this node and its children? """ raise NotImplementedError def setTokenStopIndex(self, index): raise NotImplementedError def dupNode(self): raise NotImplementedError def getType(self): """Return a token type; needed for tree parsing.""" raise NotImplementedError def getText(self): raise NotImplementedError def getLine(self): """ In case we don't have a token payload, what is the line for errors? """ raise NotImplementedError def getCharPositionInLine(self): raise NotImplementedError def toStringTree(self): raise NotImplementedError def toString(self): raise NotImplementedError class TreeAdaptor(object): """ @brief Abstract baseclass for tree adaptors. How to create and navigate trees. Rather than have a separate factory and adaptor, I've merged them. Makes sense to encapsulate. This takes the place of the tree construction code generated in the generated code in 2.x and the ASTFactory. I do not need to know the type of a tree at all so they are all generic Objects. This may increase the amount of typecasting needed. :( """ # C o n s t r u c t i o n def createWithPayload(self, payload): """ Create a tree node from Token object; for CommonTree type trees, then the token just becomes the payload. This is the most common create call. Override if you want another kind of node to be built. """ raise NotImplementedError def dupNode(self, treeNode): """Duplicate a single tree node. Override if you want another kind of node to be built.""" raise NotImplementedError def dupTree(self, tree): """Duplicate tree recursively, using dupNode() for each node""" raise NotImplementedError def nil(self): """ Return a nil node (an empty but non-null node) that can hold a list of element as the children. If you want a flat tree (a list) use "t=adaptor.nil(); t.addChild(x); t.addChild(y);" """ raise NotImplementedError def errorNode(self, input, start, stop, exc): """ Return a tree node representing an error. This node records the tokens consumed during error recovery. The start token indicates the input symbol at which the error was detected. The stop token indicates the last symbol consumed during recovery. You must specify the input stream so that the erroneous text can be packaged up in the error node. The exception could be useful to some applications; default implementation stores ptr to it in the CommonErrorNode. This only makes sense during token parsing, not tree parsing. Tree parsing should happen only when parsing and tree construction succeed. """ raise NotImplementedError def isNil(self, tree): """Is tree considered a nil node used to make lists of child nodes?""" raise NotImplementedError def addChild(self, t, child): """ Add a child to the tree t. If child is a flat tree (a list), make all in list children of t. Warning: if t has no children, but child does and child isNil then you can decide it is ok to move children to t via t.children = child.children; i.e., without copying the array. Just make sure that this is consistent with have the user will build ASTs. Do nothing if t or child is null. """ raise NotImplementedError def becomeRoot(self, newRoot, oldRoot): """ If oldRoot is a nil root, just copy or move the children to newRoot. If not a nil root, make oldRoot a child of newRoot. old=^(nil a b c), new=r yields ^(r a b c) old=^(a b c), new=r yields ^(r ^(a b c)) If newRoot is a nil-rooted single child tree, use the single child as the new root node. old=^(nil a b c), new=^(nil r) yields ^(r a b c) old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) If oldRoot was null, it's ok, just return newRoot (even if isNil). old=null, new=r yields r old=null, new=^(nil r) yields ^(nil r) Return newRoot. Throw an exception if newRoot is not a simple node or nil root with a single child node--it must be a root node. If newRoot is ^(nil x) return x as newRoot. Be advised that it's ok for newRoot to point at oldRoot's children; i.e., you don't have to copy the list. We are constructing these nodes so we should have this control for efficiency. """ raise NotImplementedError def rulePostProcessing(self, root): """ Given the root of the subtree created for this rule, post process it to do any simplifications or whatever you want. A required behavior is to convert ^(nil singleSubtree) to singleSubtree as the setting of start/stop indexes relies on a single non-nil root for non-flat trees. Flat trees such as for lists like "idlist : ID+ ;" are left alone unless there is only one ID. For a list, the start/stop indexes are set in the nil node. This method is executed after all rule tree construction and right before setTokenBoundaries(). """ raise NotImplementedError def getUniqueID(self, node): """For identifying trees. How to identify nodes so we can say "add node to a prior node"? Even becomeRoot is an issue. Use System.identityHashCode(node) usually. """ raise NotImplementedError # R e w r i t e R u l e s def createFromToken(self, tokenType, fromToken, text=None): """ Create a new node derived from a token, with a new token type and (optionally) new text. This is invoked from an imaginary node ref on right side of a rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"]. This should invoke createToken(Token). """ raise NotImplementedError def createFromType(self, tokenType, text): """Create a new node derived from a token, with a new token type. This is invoked from an imaginary node ref on right side of a rewrite rule as IMAG["IMAG"]. This should invoke createToken(int,String). """ raise NotImplementedError # C o n t e n t def getType(self, t): """For tree parsing, I need to know the token type of a node""" raise NotImplementedError def setType(self, t, type): """Node constructors can set the type of a node""" raise NotImplementedError def getText(self, t): raise NotImplementedError def setText(self, t, text): """Node constructors can set the text of a node""" raise NotImplementedError def getToken(self, t): """Return the token object from which this node was created. Currently used only for printing an error message. The error display routine in BaseRecognizer needs to display where the input the error occurred. If your tree of limitation does not store information that can lead you to the token, you can create a token filled with the appropriate information and pass that back. See BaseRecognizer.getErrorMessage(). """ raise NotImplementedError def setTokenBoundaries(self, t, startToken, stopToken): """ Where are the bounds in the input token stream for this node and all children? Each rule that creates AST nodes will call this method right before returning. Flat trees (i.e., lists) will still usually have a nil root node just to hold the children list. That node would contain the start/stop indexes then. """ raise NotImplementedError def getTokenStartIndex(self, t): """ Get the token start index for this subtree; return -1 if no such index """ raise NotImplementedError def getTokenStopIndex(self, t): """ Get the token stop index for this subtree; return -1 if no such index """ raise NotImplementedError # N a v i g a t i o n / T r e e P a r s i n g def getChild(self, t, i): """Get a child 0..n-1 node""" raise NotImplementedError def setChild(self, t, i, child): """Set ith child (0..n-1) to t; t must be non-null and non-nil node""" raise NotImplementedError def deleteChild(self, t, i): """Remove ith child and shift children down from right.""" raise NotImplementedError def getChildCount(self, t): """How many children? If 0, then this is a leaf node""" raise NotImplementedError def getParent(self, t): """ Who is the parent node of this node; if null, implies node is root. If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def setParent(self, t, parent): """ Who is the parent node of this node; if null, implies node is root. If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def getChildIndex(self, t): """ What index is this node in the child list? Range: 0..n-1 If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def setChildIndex(self, t, index): """ What index is this node in the child list? Range: 0..n-1 If your node type doesn't handle this, it's ok but the tree rewrites in tree parsers need this functionality. """ raise NotImplementedError def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): """ Replace from start to stop child index of parent with t, which might be a list. Number of children may be different after this call. If parent is null, don't do anything; must be at root of overall tree. Can't replace whatever points to the parent externally. Do nothing. """ raise NotImplementedError # Misc def create(self, *args): """ Deprecated, use createWithPayload, createFromToken or createFromType. This method only exists to mimic the Java interface of TreeAdaptor. """ if len(args) == 1 and isinstance(args[0], Token): # Object create(Token payload); ## warnings.warn( ## "Using create() is deprecated, use createWithPayload()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createWithPayload(args[0]) if (len(args) == 2 and isinstance(args[0], (int, long)) and isinstance(args[1], Token) ): # Object create(int tokenType, Token fromToken); ## warnings.warn( ## "Using create() is deprecated, use createFromToken()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromToken(args[0], args[1]) if (len(args) == 3 and isinstance(args[0], (int, long)) and isinstance(args[1], Token) and isinstance(args[2], basestring) ): # Object create(int tokenType, Token fromToken, String text); ## warnings.warn( ## "Using create() is deprecated, use createFromToken()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromToken(args[0], args[1], args[2]) if (len(args) == 2 and isinstance(args[0], (int, long)) and isinstance(args[1], basestring) ): # Object create(int tokenType, String text); ## warnings.warn( ## "Using create() is deprecated, use createFromType()", ## DeprecationWarning, ## stacklevel=2 ## ) return self.createFromType(args[0], args[1]) raise TypeError( "No create method with this signature found: %s" % (', '.join(type(v).__name__ for v in args)) ) ############################################################################ # # base implementation of Tree and TreeAdaptor # # Tree # \- BaseTree # # TreeAdaptor # \- BaseTreeAdaptor # ############################################################################ class BaseTree(Tree): """ @brief A generic tree implementation with no payload. You must subclass to actually have any user data. ANTLR v3 uses a list of children approach instead of the child-sibling approach in v2. A flat tree (a list) is an empty node whose children represent the list. An empty, but non-null node is called "nil". """ # BaseTree is abstract, no need to complain about not implemented abstract # methods # pylint: disable-msg=W0223 def __init__(self, node=None): """ Create a new node from an existing node does nothing for BaseTree as there are no fields other than the children list, which cannot be copied as the children are not considered part of this node. """ Tree.__init__(self) self.children = [] self.parent = None self.childIndex = 0 def getChild(self, i): try: return self.children[i] except IndexError: return None def getChildren(self): """@brief Get the children internal List Note that if you directly mess with the list, do so at your own risk. """ # FIXME: mark as deprecated return self.children def getFirstChildWithType(self, treeType): for child in self.children: if child.getType() == treeType: return child return None def getChildCount(self): return len(self.children) def addChild(self, childTree): """Add t as child of this node. Warning: if t has no children, but child does and child isNil then this routine moves children to t via t.children = child.children; i.e., without copying the array. """ # this implementation is much simpler and probably less efficient # than the mumbo-jumbo that Ter did for the Java runtime. if childTree is None: return if childTree.isNil(): # t is an empty node possibly with children if self.children is childTree.children: raise ValueError("attempt to add child list to itself") # fix parent pointer and childIndex for new children for idx, child in enumerate(childTree.children): child.parent = self child.childIndex = len(self.children) + idx self.children += childTree.children else: # child is not nil (don't care about children) self.children.append(childTree) childTree.parent = self childTree.childIndex = len(self.children) - 1 def addChildren(self, children): """Add all elements of kids list as children of this node""" self.children += children def setChild(self, i, t): if t is None: return if t.isNil(): raise ValueError("Can't set single child to a list") self.children[i] = t t.parent = self t.childIndex = i def deleteChild(self, i): killed = self.children[i] del self.children[i] # walk rest and decrement their child indexes for idx, child in enumerate(self.children[i:]): child.childIndex = i + idx return killed def replaceChildren(self, startChildIndex, stopChildIndex, newTree): """ Delete children from start to stop and replace with t even if t is a list (nil-root tree). num of children can increase or decrease. For huge child lists, inserting children can force walking rest of children to set their childindex; could be slow. """ if (startChildIndex >= len(self.children) or stopChildIndex >= len(self.children) ): raise IndexError("indexes invalid") replacingHowMany = stopChildIndex - startChildIndex + 1 # normalize to a list of children to add: newChildren if newTree.isNil(): newChildren = newTree.children else: newChildren = [newTree] replacingWithHowMany = len(newChildren) delta = replacingHowMany - replacingWithHowMany if delta == 0: # if same number of nodes, do direct replace for idx, child in enumerate(newChildren): self.children[idx + startChildIndex] = child child.parent = self child.childIndex = idx + startChildIndex else: # length of children changes... # ...delete replaced segment... del self.children[startChildIndex:stopChildIndex+1] # ...insert new segment... self.children[startChildIndex:startChildIndex] = newChildren # ...and fix indeces self.freshenParentAndChildIndexes(startChildIndex) def isNil(self): return False def freshenParentAndChildIndexes(self, offset=0): for idx, child in enumerate(self.children[offset:]): child.childIndex = idx + offset child.parent = self def sanityCheckParentAndChildIndexes(self, parent=None, i=-1): if parent != self.parent: raise ValueError( "parents don't match; expected %r found %r" % (parent, self.parent) ) if i != self.childIndex: raise ValueError( "child indexes don't match; expected %d found %d" % (i, self.childIndex) ) for idx, child in enumerate(self.children): child.sanityCheckParentAndChildIndexes(self, idx) def getChildIndex(self): """BaseTree doesn't track child indexes.""" return 0 def setChildIndex(self, index): """BaseTree doesn't track child indexes.""" pass def getParent(self): """BaseTree doesn't track parent pointers.""" return None def setParent(self, t): """BaseTree doesn't track parent pointers.""" pass def hasAncestor(self, ttype): """Walk upwards looking for ancestor with this token type.""" return self.getAncestor(ttype) is not None def getAncestor(self, ttype): """Walk upwards and get first ancestor with this token type.""" t = self.getParent() while t is not None: if t.getType() == ttype: return t t = t.getParent() return None def getAncestors(self): """Return a list of all ancestors of this node. The first node of list is the root and the last is the parent of this node. """ if selfgetParent() is None: return None ancestors = [] t = self.getParent() while t is not None: ancestors.insert(0, t) # insert at start t = t.getParent() return ancestors def toStringTree(self): """Print out a whole tree not just a node""" if len(self.children) == 0: return self.toString() buf = [] if not self.isNil(): buf.append('(') buf.append(self.toString()) buf.append(' ') for i, child in enumerate(self.children): if i > 0: buf.append(' ') buf.append(child.toStringTree()) if not self.isNil(): buf.append(')') return ''.join(buf) def getLine(self): return 0 def getCharPositionInLine(self): return 0 def toString(self): """Override to say how a node (not a tree) should look as text""" raise NotImplementedError class BaseTreeAdaptor(TreeAdaptor): """ @brief A TreeAdaptor that works with any Tree implementation. """ # BaseTreeAdaptor is abstract, no need to complain about not implemented # abstract methods # pylint: disable-msg=W0223 def nil(self): return self.createWithPayload(None) def errorNode(self, input, start, stop, exc): """ create tree node that holds the start and stop tokens associated with an error. If you specify your own kind of tree nodes, you will likely have to override this method. CommonTree returns Token.INVALID_TOKEN_TYPE if no token payload but you might have to set token type for diff node type. You don't have to subclass CommonErrorNode; you will likely need to subclass your own tree node class to avoid class cast exception. """ return CommonErrorNode(input, start, stop, exc) def isNil(self, tree): return tree.isNil() def dupTree(self, t, parent=None): """ This is generic in the sense that it will work with any kind of tree (not just Tree interface). It invokes the adaptor routines not the tree node routines to do the construction. """ if t is None: return None newTree = self.dupNode(t) # ensure new subtree root has parent/child index set # same index in new tree self.setChildIndex(newTree, self.getChildIndex(t)) self.setParent(newTree, parent) for i in range(self.getChildCount(t)): child = self.getChild(t, i) newSubTree = self.dupTree(child, t) self.addChild(newTree, newSubTree) return newTree def addChild(self, tree, child): """ Add a child to the tree t. If child is a flat tree (a list), make all in list children of t. Warning: if t has no children, but child does and child isNil then you can decide it is ok to move children to t via t.children = child.children; i.e., without copying the array. Just make sure that this is consistent with have the user will build ASTs. """ #if isinstance(child, Token): # child = self.createWithPayload(child) if tree is not None and child is not None: tree.addChild(child) def becomeRoot(self, newRoot, oldRoot): """ If oldRoot is a nil root, just copy or move the children to newRoot. If not a nil root, make oldRoot a child of newRoot. old=^(nil a b c), new=r yields ^(r a b c) old=^(a b c), new=r yields ^(r ^(a b c)) If newRoot is a nil-rooted single child tree, use the single child as the new root node. old=^(nil a b c), new=^(nil r) yields ^(r a b c) old=^(a b c), new=^(nil r) yields ^(r ^(a b c)) If oldRoot was null, it's ok, just return newRoot (even if isNil). old=null, new=r yields r old=null, new=^(nil r) yields ^(nil r) Return newRoot. Throw an exception if newRoot is not a simple node or nil root with a single child node--it must be a root node. If newRoot is ^(nil x) return x as newRoot. Be advised that it's ok for newRoot to point at oldRoot's children; i.e., you don't have to copy the list. We are constructing these nodes so we should have this control for efficiency. """ if isinstance(newRoot, Token): newRoot = self.create(newRoot) if oldRoot is None: return newRoot if not isinstance(newRoot, CommonTree): newRoot = self.createWithPayload(newRoot) # handle ^(nil real-node) if newRoot.isNil(): nc = newRoot.getChildCount() if nc == 1: newRoot = newRoot.getChild(0) elif nc > 1: # TODO: make tree run time exceptions hierarchy raise RuntimeError("more than one node as root") # add oldRoot to newRoot; addChild takes care of case where oldRoot # is a flat list (i.e., nil-rooted tree). All children of oldRoot # are added to newRoot. newRoot.addChild(oldRoot) return newRoot def rulePostProcessing(self, root): """Transform ^(nil x) to x and nil to null""" if root is not None and root.isNil(): if root.getChildCount() == 0: root = None elif root.getChildCount() == 1: root = root.getChild(0) # whoever invokes rule will set parent and child index root.setParent(None) root.setChildIndex(-1) return root def createFromToken(self, tokenType, fromToken, text=None): if fromToken is None: return self.createFromType(tokenType, text) assert isinstance(tokenType, (int, long)), type(tokenType).__name__ assert isinstance(fromToken, Token), type(fromToken).__name__ assert text is None or isinstance(text, basestring), type(text).__name__ fromToken = self.createToken(fromToken) fromToken.type = tokenType if text is not None: fromToken.text = text t = self.createWithPayload(fromToken) return t def createFromType(self, tokenType, text): assert isinstance(tokenType, (int, long)), type(tokenType).__name__ assert isinstance(text, basestring) or text is None, type(text).__name__ fromToken = self.createToken(tokenType=tokenType, text=text) t = self.createWithPayload(fromToken) return t def getType(self, t): return t.getType() def setType(self, t, type): raise RuntimeError("don't know enough about Tree node") def getText(self, t): return t.getText() def setText(self, t, text): raise RuntimeError("don't know enough about Tree node") def getChild(self, t, i): return t.getChild(i) def setChild(self, t, i, child): t.setChild(i, child) def deleteChild(self, t, i): return t.deleteChild(i) def getChildCount(self, t): return t.getChildCount() def getUniqueID(self, node): return hash(node) def createToken(self, fromToken=None, tokenType=None, text=None): """ Tell me how to create a token for use with imaginary token nodes. For example, there is probably no input symbol associated with imaginary token DECL, but you need to create it as a payload or whatever for the DECL node as in ^(DECL type ID). If you care what the token payload objects' type is, you should override this method and any other createToken variant. """ raise NotImplementedError ############################################################################ # # common tree implementation # # Tree # \- BaseTree # \- CommonTree # \- CommonErrorNode # # TreeAdaptor # \- BaseTreeAdaptor # \- CommonTreeAdaptor # ############################################################################ class CommonTree(BaseTree): """@brief A tree node that is wrapper for a Token object. After 3.0 release while building tree rewrite stuff, it became clear that computing parent and child index is very difficult and cumbersome. Better to spend the space in every tree node. If you don't want these extra fields, it's easy to cut them out in your own BaseTree subclass. """ def __init__(self, payload): BaseTree.__init__(self) # What token indexes bracket all tokens associated with this node # and below? self.startIndex = -1 self.stopIndex = -1 # Who is the parent node of this node; if null, implies node is root self.parent = None # What index is this node in the child list? Range: 0..n-1 self.childIndex = -1 # A single token is the payload if payload is None: self.token = None elif isinstance(payload, CommonTree): self.token = payload.token self.startIndex = payload.startIndex self.stopIndex = payload.stopIndex elif payload is None or isinstance(payload, Token): self.token = payload else: raise TypeError(type(payload).__name__) def getToken(self): return self.token def dupNode(self): return CommonTree(self) def isNil(self): return self.token is None def getType(self): if self.token is None: return INVALID_TOKEN_TYPE return self.token.getType() type = property(getType) def getText(self): if self.token is None: return None return self.token.text text = property(getText) def getLine(self): if self.token is None or self.token.getLine() == 0: if self.getChildCount(): return self.getChild(0).getLine() else: return 0 return self.token.getLine() line = property(getLine) def getCharPositionInLine(self): if self.token is None or self.token.getCharPositionInLine() == -1: if self.getChildCount(): return self.getChild(0).getCharPositionInLine() else: return 0 else: return self.token.getCharPositionInLine() charPositionInLine = property(getCharPositionInLine) def getTokenStartIndex(self): if self.startIndex == -1 and self.token is not None: return self.token.getTokenIndex() return self.startIndex def setTokenStartIndex(self, index): self.startIndex = index tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex) def getTokenStopIndex(self): if self.stopIndex == -1 and self.token is not None: return self.token.getTokenIndex() return self.stopIndex def setTokenStopIndex(self, index): self.stopIndex = index tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex) def setUnknownTokenBoundaries(self): """For every node in this subtree, make sure it's start/stop token's are set. Walk depth first, visit bottom up. Only updates nodes with at least one token index < 0. """ if self.children is None: if self.startIndex < 0 or self.stopIndex < 0: self.startIndex = self.stopIndex = self.token.getTokenIndex() return for child in self.children: child.setUnknownTokenBoundaries() if self.startIndex >= 0 and self.stopIndex >= 0: # already set return if self.children: firstChild = self.children[0] lastChild = self.children[-1] self.startIndex = firstChild.getTokenStartIndex() self.stopIndex = lastChild.getTokenStopIndex() def getChildIndex(self): #FIXME: mark as deprecated return self.childIndex def setChildIndex(self, idx): #FIXME: mark as deprecated self.childIndex = idx def getParent(self): #FIXME: mark as deprecated return self.parent def setParent(self, t): #FIXME: mark as deprecated self.parent = t def toString(self): if self.isNil(): return "nil" if self.getType() == INVALID_TOKEN_TYPE: return "<errornode>" return self.token.text __str__ = toString def toStringTree(self): if not self.children: return self.toString() ret = '' if not self.isNil(): ret += '(%s ' % (self.toString()) ret += ' '.join([child.toStringTree() for child in self.children]) if not self.isNil(): ret += ')' return ret INVALID_NODE = CommonTree(INVALID_TOKEN) class CommonErrorNode(CommonTree): """A node representing erroneous token range in token stream""" def __init__(self, input, start, stop, exc): CommonTree.__init__(self, None) if (stop is None or (stop.getTokenIndex() < start.getTokenIndex() and stop.getType() != EOF ) ): # sometimes resync does not consume a token (when LT(1) is # in follow set. So, stop will be 1 to left to start. adjust. # Also handle case where start is the first token and no token # is consumed during recovery; LT(-1) will return null. stop = start self.input = input self.start = start self.stop = stop self.trappedException = exc def isNil(self): return False def getType(self): return INVALID_TOKEN_TYPE def getText(self): if isinstance(self.start, Token): i = self.start.getTokenIndex() j = self.stop.getTokenIndex() if self.stop.getType() == EOF: j = self.input.size() badText = self.input.toString(i, j) elif isinstance(self.start, Tree): badText = self.input.toString(self.start, self.stop) else: # people should subclass if they alter the tree type so this # next one is for sure correct. badText = "<unknown>" return badText def toString(self): if isinstance(self.trappedException, MissingTokenException): return ("<missing type: " + str(self.trappedException.getMissingType()) + ">") elif isinstance(self.trappedException, UnwantedTokenException): return ("<extraneous: " + str(self.trappedException.getUnexpectedToken()) + ", resync=" + self.getText() + ">") elif isinstance(self.trappedException, MismatchedTokenException): return ("<mismatched token: " + str(self.trappedException.token) + ", resync=" + self.getText() + ">") elif isinstance(self.trappedException, NoViableAltException): return ("<unexpected: " + str(self.trappedException.token) + ", resync=" + self.getText() + ">") return "<error: "+self.getText()+">" class CommonTreeAdaptor(BaseTreeAdaptor): """ @brief A TreeAdaptor that works with any Tree implementation. It provides really just factory methods; all the work is done by BaseTreeAdaptor. If you would like to have different tokens created than ClassicToken objects, you need to override this and then set the parser tree adaptor to use your subclass. To get your parser to build nodes of a different type, override create(Token), errorNode(), and to be safe, YourTreeClass.dupNode(). dupNode is called to duplicate nodes during rewrite operations. """ def dupNode(self, treeNode): """ Duplicate a node. This is part of the factory; override if you want another kind of node to be built. I could use reflection to prevent having to override this but reflection is slow. """ if treeNode is None: return None return treeNode.dupNode() def createWithPayload(self, payload): return CommonTree(payload) def createToken(self, fromToken=None, tokenType=None, text=None): """ Tell me how to create a token for use with imaginary token nodes. For example, there is probably no input symbol associated with imaginary token DECL, but you need to create it as a payload or whatever for the DECL node as in ^(DECL type ID). If you care what the token payload objects' type is, you should override this method and any other createToken variant. """ if fromToken is not None: return CommonToken(oldToken=fromToken) return CommonToken(type=tokenType, text=text) def setTokenBoundaries(self, t, startToken, stopToken): """ Track start/stop token for subtree root created for a rule. Only works with Tree nodes. For rules that match nothing, seems like this will yield start=i and stop=i-1 in a nil node. Might be useful info so I'll not force to be i..i. """ if t is None: return start = 0 stop = 0 if startToken is not None: start = startToken.index if stopToken is not None: stop = stopToken.index t.setTokenStartIndex(start) t.setTokenStopIndex(stop) def getTokenStartIndex(self, t): if t is None: return -1 return t.getTokenStartIndex() def getTokenStopIndex(self, t): if t is None: return -1 return t.getTokenStopIndex() def getText(self, t): if t is None: return None return t.getText() def getType(self, t): if t is None: return INVALID_TOKEN_TYPE return t.getType() def getToken(self, t): """ What is the Token associated with this node? If you are not using CommonTree, then you must override this in your own adaptor. """ if isinstance(t, CommonTree): return t.getToken() return None # no idea what to do def getChild(self, t, i): if t is None: return None return t.getChild(i) def getChildCount(self, t): if t is None: return 0 return t.getChildCount() def getParent(self, t): return t.getParent() def setParent(self, t, parent): t.setParent(parent) def getChildIndex(self, t): if t is None: return 0 return t.getChildIndex() def setChildIndex(self, t, index): t.setChildIndex(index) def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): if parent is not None: parent.replaceChildren(startChildIndex, stopChildIndex, t) ############################################################################ # # streams # # TreeNodeStream # \- BaseTree # \- CommonTree # # TreeAdaptor # \- BaseTreeAdaptor # \- CommonTreeAdaptor # ############################################################################ class TreeNodeStream(IntStream): """@brief A stream of tree nodes It accessing nodes from a tree of some kind. """ # TreeNodeStream is abstract, no need to complain about not implemented # abstract methods # pylint: disable-msg=W0223 def get(self, i): """Get a tree node at an absolute index i; 0..n-1. If you don't want to buffer up nodes, then this method makes no sense for you. """ raise NotImplementedError def LT(self, k): """ Get tree node at current input pointer + i ahead where i=1 is next node. i<0 indicates nodes in the past. So LT(-1) is previous node, but implementations are not required to provide results for k < -1. LT(0) is undefined. For i>=n, return null. Return null for LT(0) and any index that results in an absolute address that is negative. This is analogus to the LT() method of the TokenStream, but this returns a tree node instead of a token. Makes code gen identical for both parser and tree grammars. :) """ raise NotImplementedError def getTreeSource(self): """ Where is this stream pulling nodes from? This is not the name, but the object that provides node objects. """ raise NotImplementedError def getTokenStream(self): """ If the tree associated with this stream was created from a TokenStream, you can specify it here. Used to do rule $text attribute in tree parser. Optional unless you use tree parser rule text attribute or output=template and rewrite=true options. """ raise NotImplementedError def getTreeAdaptor(self): """ What adaptor can tell me how to interpret/navigate nodes and trees. E.g., get text of a node. """ raise NotImplementedError def setUniqueNavigationNodes(self, uniqueNavigationNodes): """ As we flatten the tree, we use UP, DOWN nodes to represent the tree structure. When debugging we need unique nodes so we have to instantiate new ones. When doing normal tree parsing, it's slow and a waste of memory to create unique navigation nodes. Default should be false; """ raise NotImplementedError def reset(self): """ Reset the tree node stream in such a way that it acts like a freshly constructed stream. """ raise NotImplementedError def toString(self, start, stop): """ Return the text of all nodes from start to stop, inclusive. If the stream does not buffer all the nodes then it can still walk recursively from start until stop. You can always return null or "" too, but users should not access $ruleLabel.text in an action of course in that case. """ raise NotImplementedError # REWRITING TREES (used by tree parser) def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): """ Replace from start to stop child index of parent with t, which might be a list. Number of children may be different after this call. The stream is notified because it is walking the tree and might need to know you are monkeying with the underlying tree. Also, it might be able to modify the node stream to avoid restreaming for future phases. If parent is null, don't do anything; must be at root of overall tree. Can't replace whatever points to the parent externally. Do nothing. """ raise NotImplementedError class CommonTreeNodeStream(TreeNodeStream): """@brief A buffered stream of tree nodes. Nodes can be from a tree of ANY kind. This node stream sucks all nodes out of the tree specified in the constructor during construction and makes pointers into the tree using an array of Object pointers. The stream necessarily includes pointers to DOWN and UP and EOF nodes. This stream knows how to mark/release for backtracking. This stream is most suitable for tree interpreters that need to jump around a lot or for tree parsers requiring speed (at cost of memory). There is some duplicated functionality here with UnBufferedTreeNodeStream but just in bookkeeping, not tree walking etc... @see UnBufferedTreeNodeStream """ def __init__(self, *args): TreeNodeStream.__init__(self) if len(args) == 1: adaptor = CommonTreeAdaptor() tree = args[0] nodes = None down = None up = None eof = None elif len(args) == 2: adaptor = args[0] tree = args[1] nodes = None down = None up = None eof = None elif len(args) == 3: parent = args[0] start = args[1] stop = args[2] adaptor = parent.adaptor tree = parent.root nodes = parent.nodes[start:stop] down = parent.down up = parent.up eof = parent.eof else: raise TypeError("Invalid arguments") # all these navigation nodes are shared and hence they # cannot contain any line/column info if down is not None: self.down = down else: self.down = adaptor.createFromType(DOWN, "DOWN") if up is not None: self.up = up else: self.up = adaptor.createFromType(UP, "UP") if eof is not None: self.eof = eof else: self.eof = adaptor.createFromType(EOF, "EOF") # The complete mapping from stream index to tree node. # This buffer includes pointers to DOWN, UP, and EOF nodes. # It is built upon ctor invocation. The elements are type # Object as we don't what the trees look like. # Load upon first need of the buffer so we can set token types # of interest for reverseIndexing. Slows us down a wee bit to # do all of the if p==-1 testing everywhere though. if nodes is not None: self.nodes = nodes else: self.nodes = [] # Pull nodes from which tree? self.root = tree # IF this tree (root) was created from a token stream, track it. self.tokens = None # What tree adaptor was used to build these trees self.adaptor = adaptor # Reuse same DOWN, UP navigation nodes unless this is true self.uniqueNavigationNodes = False # The index into the nodes list of the current node (next node # to consume). If -1, nodes array not filled yet. self.p = -1 # Track the last mark() call result value for use in rewind(). self.lastMarker = None # Stack of indexes used for push/pop calls self.calls = [] def __iter__(self): return TreeIterator(self.root, self.adaptor) def fillBuffer(self): """Walk tree with depth-first-search and fill nodes buffer. Don't do DOWN, UP nodes if its a list (t is isNil). """ self._fillBuffer(self.root) self.p = 0 # buffer of nodes intialized now def _fillBuffer(self, t): nil = self.adaptor.isNil(t) if not nil: self.nodes.append(t) # add this node # add DOWN node if t has children n = self.adaptor.getChildCount(t) if not nil and n > 0: self.addNavigationNode(DOWN) # and now add all its children for c in range(n): self._fillBuffer(self.adaptor.getChild(t, c)) # add UP node if t has children if not nil and n > 0: self.addNavigationNode(UP) def getNodeIndex(self, node): """What is the stream index for node? 0..n-1 Return -1 if node not found. """ if self.p == -1: self.fillBuffer() for i, t in enumerate(self.nodes): if t == node: return i return -1 def addNavigationNode(self, ttype): """ As we flatten the tree, we use UP, DOWN nodes to represent the tree structure. When debugging we need unique nodes so instantiate new ones when uniqueNavigationNodes is true. """ navNode = None if ttype == DOWN: if self.hasUniqueNavigationNodes(): navNode = self.adaptor.createFromType(DOWN, "DOWN") else: navNode = self.down else: if self.hasUniqueNavigationNodes(): navNode = self.adaptor.createFromType(UP, "UP") else: navNode = self.up self.nodes.append(navNode) def get(self, i): if self.p == -1: self.fillBuffer() return self.nodes[i] def LT(self, k): if self.p == -1: self.fillBuffer() if k == 0: return None if k < 0: return self.LB(-k) if self.p + k - 1 >= len(self.nodes): return self.eof return self.nodes[self.p + k - 1] def getCurrentSymbol(self): return self.LT(1) def LB(self, k): """Look backwards k nodes""" if k == 0: return None if self.p - k < 0: return None return self.nodes[self.p - k] def isEOF(self, obj): return self.adaptor.getType(obj) == EOF def getTreeSource(self): return self.root def getSourceName(self): return self.getTokenStream().getSourceName() def getTokenStream(self): return self.tokens def setTokenStream(self, tokens): self.tokens = tokens def getTreeAdaptor(self): return self.adaptor def hasUniqueNavigationNodes(self): return self.uniqueNavigationNodes def setUniqueNavigationNodes(self, uniqueNavigationNodes): self.uniqueNavigationNodes = uniqueNavigationNodes def consume(self): if self.p == -1: self.fillBuffer() self.p += 1 def LA(self, i): return self.adaptor.getType(self.LT(i)) def mark(self): if self.p == -1: self.fillBuffer() self.lastMarker = self.index() return self.lastMarker def release(self, marker=None): # no resources to release pass def index(self): return self.p def rewind(self, marker=None): if marker is None: marker = self.lastMarker self.seek(marker) def seek(self, index): if self.p == -1: self.fillBuffer() self.p = index def push(self, index): """ Make stream jump to a new location, saving old location. Switch back with pop(). """ self.calls.append(self.p) # save current index self.seek(index) def pop(self): """ Seek back to previous index saved during last push() call. Return top of stack (return index). """ ret = self.calls.pop(-1) self.seek(ret) return ret def reset(self): self.p = 0 self.lastMarker = 0 self.calls = [] def size(self): if self.p == -1: self.fillBuffer() return len(self.nodes) # TREE REWRITE INTERFACE def replaceChildren(self, parent, startChildIndex, stopChildIndex, t): if parent is not None: self.adaptor.replaceChildren( parent, startChildIndex, stopChildIndex, t ) def __str__(self): """Used for testing, just return the token type stream""" if self.p == -1: self.fillBuffer() return ' '.join([str(self.adaptor.getType(node)) for node in self.nodes ]) def toString(self, start, stop): if start is None or stop is None: return None if self.p == -1: self.fillBuffer() #System.out.println("stop: "+stop); #if ( start instanceof CommonTree ) # System.out.print("toString: "+((CommonTree)start).getToken()+", "); #else # System.out.println(start); #if ( stop instanceof CommonTree ) # System.out.println(((CommonTree)stop).getToken()); #else # System.out.println(stop); # if we have the token stream, use that to dump text in order if self.tokens is not None: beginTokenIndex = self.adaptor.getTokenStartIndex(start) endTokenIndex = self.adaptor.getTokenStopIndex(stop) # if it's a tree, use start/stop index from start node # else use token range from start/stop nodes if self.adaptor.getType(stop) == UP: endTokenIndex = self.adaptor.getTokenStopIndex(start) elif self.adaptor.getType(stop) == EOF: endTokenIndex = self.size() -2 # don't use EOF return self.tokens.toString(beginTokenIndex, endTokenIndex) # walk nodes looking for start i, t = 0, None for i, t in enumerate(self.nodes): if t == start: break # now walk until we see stop, filling string buffer with text buf = [] t = self.nodes[i] while t != stop: text = self.adaptor.getText(t) if text is None: text = " " + self.adaptor.getType(t) buf.append(text) i += 1 t = self.nodes[i] # include stop node too text = self.adaptor.getText(stop) if text is None: text = " " +self.adaptor.getType(stop) buf.append(text) return ''.join(buf) ## iterator interface def __iter__(self): if self.p == -1: self.fillBuffer() for node in self.nodes: yield node ############################################################################# # # tree parser # ############################################################################# class TreeParser(BaseRecognizer): """@brief Baseclass for generated tree parsers. A parser for a stream of tree nodes. "tree grammars" result in a subclass of this. All the error reporting and recovery is shared with Parser via the BaseRecognizer superclass. """ def __init__(self, input, state=None): BaseRecognizer.__init__(self, state) self.input = None self.setTreeNodeStream(input) def reset(self): BaseRecognizer.reset(self) # reset all recognizer state variables if self.input is not None: self.input.seek(0) # rewind the input def setTreeNodeStream(self, input): """Set the input stream""" self.input = input def getTreeNodeStream(self): return self.input def getSourceName(self): return self.input.getSourceName() def getCurrentInputSymbol(self, input): return input.LT(1) def getMissingSymbol(self, input, e, expectedTokenType, follow): tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">" adaptor = input.adaptor return adaptor.createToken( CommonToken(type=expectedTokenType, text=tokenText)) # precompiled regex used by inContext dotdot = ".*[^.]\\.\\.[^.].*" doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*" dotdotPattern = re.compile(dotdot) doubleEtcPattern = re.compile(doubleEtc) def inContext(self, context, adaptor=None, tokenName=None, t=None): """Check if current node in input has a context. Context means sequence of nodes towards root of tree. For example, you might say context is "MULT" which means my parent must be MULT. "CLASS VARDEF" says current node must be child of a VARDEF and whose parent is a CLASS node. You can use "..." to mean zero-or-more nodes. "METHOD ... VARDEF" means my parent is VARDEF and somewhere above that is a METHOD node. The first node in the context is not necessarily the root. The context matcher stops matching and returns true when it runs out of context. There is no way to force the first node to be the root. """ return _inContext( self.input.getTreeAdaptor(), self.getTokenNames(), self.input.LT(1), context) @classmethod def _inContext(cls, adaptor, tokenNames, t, context): """The worker for inContext. It's static and full of parameters for testing purposes. """ if cls.dotdotPattern.match(context): # don't allow "..", must be "..." raise ValueError("invalid syntax: ..") if cls.doubleEtcPattern.match(context): # don't allow double "..." raise ValueError("invalid syntax: ... ...") # ensure spaces around ... context = context.replace("...", " ... ") context = context.strip() nodes = context.split() ni = len(nodes) - 1 t = adaptor.getParent(t) while ni >= 0 and t is not None: if nodes[ni] == "...": # walk upwards until we see nodes[ni-1] then continue walking if ni == 0: # ... at start is no-op return True goal = nodes[ni-1] ancestor = cls._getAncestor(adaptor, tokenNames, t, goal) if ancestor is None: return False t = ancestor ni -= 1 name = tokenNames[adaptor.getType(t)] if name != nodes[ni]: return False # advance to parent and to previous element in context node list ni -= 1 t = adaptor.getParent(t) # at root but more nodes to match if t is None and ni >= 0: return False return True @staticmethod def _getAncestor(adaptor, tokenNames, t, goal): """Helper for static inContext.""" while t is not None: name = tokenNames[adaptor.getType(t)] if name == goal: return t t = adaptor.getParent(t) return None def matchAny(self, ignore): # ignore stream, copy of this.input """ Match '.' in tree parser has special meaning. Skip node or entire tree if node has children. If children, scan until corresponding UP node. """ self._state.errorRecovery = False look = self.input.LT(1) if self.input.getTreeAdaptor().getChildCount(look) == 0: self.input.consume() # not subtree, consume 1 node and return return # current node is a subtree, skip to corresponding UP. # must count nesting level to get right UP level = 0 tokenType = self.input.getTreeAdaptor().getType(look) while tokenType != EOF and not (tokenType == UP and level==0): self.input.consume() look = self.input.LT(1) tokenType = self.input.getTreeAdaptor().getType(look) if tokenType == DOWN: level += 1 elif tokenType == UP: level -= 1 self.input.consume() # consume UP def mismatch(self, input, ttype, follow): """ We have DOWN/UP nodes in the stream that have no line info; override. plus we want to alter the exception type. Don't try to recover from tree parser errors inline... """ raise MismatchedTreeNodeException(ttype, input) def getErrorHeader(self, e): """ Prefix error message with the grammar name because message is always intended for the programmer because the parser built the input tree not the user. """ return (self.getGrammarFileName() + ": node from %sline %s:%s" % (['', "after "][e.approximateLineInfo], e.line, e.charPositionInLine ) ) def getErrorMessage(self, e, tokenNames): """ Tree parsers parse nodes they usually have a token object as payload. Set the exception token and do the default behavior. """ if isinstance(self, TreeParser): adaptor = e.input.getTreeAdaptor() e.token = adaptor.getToken(e.node) if e.token is not None: # could be an UP/DOWN node e.token = CommonToken( type=adaptor.getType(e.node), text=adaptor.getText(e.node) ) return BaseRecognizer.getErrorMessage(self, e, tokenNames) def traceIn(self, ruleName, ruleIndex): BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1)) def traceOut(self, ruleName, ruleIndex): BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1)) ############################################################################# # # tree visitor # ############################################################################# class TreeVisitor(object): """Do a depth first walk of a tree, applying pre() and post() actions we go. """ def __init__(self, adaptor=None): if adaptor is not None: self.adaptor = adaptor else: self.adaptor = CommonTreeAdaptor() def visit(self, t, pre_action=None, post_action=None): """Visit every node in tree t and trigger an action for each node before/after having visited all of its children. Bottom up walk. Execute both actions even if t has no children. Ignore return results from transforming children since they will have altered the child list of this node (their parent). Return result of applying post action to this node. The Python version differs from the Java version by taking two callables 'pre_action' and 'post_action' instead of a class instance that wraps those methods. Those callables must accept a TreeNode as their single argument and return the (potentially transformed or replaced) TreeNode. """ isNil = self.adaptor.isNil(t) if pre_action is not None and not isNil: # if rewritten, walk children of new t t = pre_action(t) idx = 0 while idx < self.adaptor.getChildCount(t): child = self.adaptor.getChild(t, idx) self.visit(child, pre_action, post_action) idx += 1 if post_action is not None and not isNil: t = post_action(t) return t ############################################################################# # # tree iterator # ############################################################################# class TreeIterator(object): """ Return a node stream from a doubly-linked tree whose nodes know what child index they are. Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure. """ def __init__(self, tree, adaptor=None): if adaptor is None: adaptor = CommonTreeAdaptor() self.root = tree self.adaptor = adaptor self.first_time = True self.tree = tree # If we emit UP/DOWN nodes, we need to spit out multiple nodes per # next() call. self.nodes = [] # navigation nodes to return during walk and at end self.down = adaptor.createFromType(DOWN, "DOWN") self.up = adaptor.createFromType(UP, "UP") self.eof = adaptor.createFromType(EOF, "EOF") def reset(self): self.first_time = True self.tree = self.root self.nodes = [] def __iter__(self): return self def has_next(self): if self.first_time: return self.root is not None if len(self.nodes) > 0: return True if self.tree is None: return False if self.adaptor.getChildCount(self.tree) > 0: return True # back at root? return self.adaptor.getParent(self.tree) is not None def next(self): if not self.has_next(): raise StopIteration if self.first_time: # initial condition self.first_time = False if self.adaptor.getChildCount(self.tree) == 0: # single node tree (special) self.nodes.append(self.eof) return self.tree return self.tree # if any queued up, use those first if len(self.nodes) > 0: return self.nodes.pop(0) # no nodes left? if self.tree is None: return self.eof # next node will be child 0 if any children if self.adaptor.getChildCount(self.tree) > 0: self.tree = self.adaptor.getChild(self.tree, 0) # real node is next after DOWN self.nodes.append(self.tree) return self.down # if no children, look for next sibling of tree or ancestor parent = self.adaptor.getParent(self.tree) # while we're out of siblings, keep popping back up towards root while (parent is not None and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)): # we're moving back up self.nodes.append(self.up) self.tree = parent parent = self.adaptor.getParent(self.tree) # no nodes left? if parent is None: self.tree = None # back at root? nothing left then self.nodes.append(self.eof) # add to queue, might have UP nodes in there return self.nodes.pop(0) # must have found a node with an unvisited sibling # move to it and return it nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1 self.tree = self.adaptor.getChild(parent, nextSiblingIndex) self.nodes.append(self.tree) # add to queue, might have UP nodes in there return self.nodes.pop(0) ############################################################################# # # streams for rule rewriting # ############################################################################# class RewriteRuleElementStream(object): """@brief Internal helper class. A generic list of elements tracked in an alternative to be used in a -> rewrite rule. We need to subclass to fill in the next() method, which returns either an AST node wrapped around a token payload or an existing subtree. Once you start next()ing, do not try to add more elements. It will break the cursor tracking I believe. @see org.antlr.runtime.tree.RewriteRuleSubtreeStream @see org.antlr.runtime.tree.RewriteRuleTokenStream TODO: add mechanism to detect/puke on modification after reading from stream """ def __init__(self, adaptor, elementDescription, elements=None): # Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(), # which bumps it to 1 meaning no more elements. self.cursor = 0 # Track single elements w/o creating a list. Upon 2nd add, alloc list self.singleElement = None # The list of tokens or subtrees we are tracking self.elements = None # Once a node / subtree has been used in a stream, it must be dup'd # from then on. Streams are reset after subrules so that the streams # can be reused in future subrules. So, reset must set a dirty bit. # If dirty, then next() always returns a dup. self.dirty = False # The element or stream description; usually has name of the token or # rule reference that this list tracks. Can include rulename too, but # the exception would track that info. self.elementDescription = elementDescription self.adaptor = adaptor if isinstance(elements, (list, tuple)): # Create a stream, but feed off an existing list self.singleElement = None self.elements = elements else: # Create a stream with one element self.add(elements) def reset(self): """ Reset the condition of this stream so that it appears we have not consumed any of its elements. Elements themselves are untouched. Once we reset the stream, any future use will need duplicates. Set the dirty bit. """ self.cursor = 0 self.dirty = True def add(self, el): if el is None: return if self.elements is not None: # if in list, just add self.elements.append(el) return if self.singleElement is None: # no elements yet, track w/o list self.singleElement = el return # adding 2nd element, move to list self.elements = [] self.elements.append(self.singleElement) self.singleElement = None self.elements.append(el) def nextTree(self): """ Return the next element in the stream. If out of elements, throw an exception unless size()==1. If size is 1, then return elements[0]. Return a duplicate node/subtree if stream is out of elements and size==1. If we've already used the element, dup (dirty bit set). """ if (self.dirty or (self.cursor >= len(self) and len(self) == 1) ): # if out of elements and size is 1, dup el = self._next() return self.dup(el) # test size above then fetch el = self._next() return el def _next(self): """ do the work of getting the next element, making sure that it's a tree node or subtree. Deal with the optimization of single- element list versus list of size > 1. Throw an exception if the stream is empty or we're out of elements and size>1. protected so you can override in a subclass if necessary. """ if len(self) == 0: raise RewriteEmptyStreamException(self.elementDescription) if self.cursor >= len(self): # out of elements? if len(self) == 1: # if size is 1, it's ok; return and we'll dup return self.toTree(self.singleElement) # out of elements and size was not 1, so we can't dup raise RewriteCardinalityException(self.elementDescription) # we have elements if self.singleElement is not None: self.cursor += 1 # move cursor even for single element list return self.toTree(self.singleElement) # must have more than one in list, pull from elements o = self.toTree(self.elements[self.cursor]) self.cursor += 1 return o def dup(self, el): """ When constructing trees, sometimes we need to dup a token or AST subtree. Dup'ing a token means just creating another AST node around it. For trees, you must call the adaptor.dupTree() unless the element is for a tree root; then it must be a node dup. """ raise NotImplementedError def toTree(self, el): """ Ensure stream emits trees; tokens must be converted to AST nodes. AST nodes can be passed through unmolested. """ return el def hasNext(self): return ( (self.singleElement is not None and self.cursor < 1) or (self.elements is not None and self.cursor < len(self.elements) ) ) def size(self): if self.singleElement is not None: return 1 if self.elements is not None: return len(self.elements) return 0 __len__ = size def getDescription(self): """Deprecated. Directly access elementDescription attribute""" return self.elementDescription class RewriteRuleTokenStream(RewriteRuleElementStream): """@brief Internal helper class.""" def toTree(self, el): # Don't convert to a tree unless they explicitly call nextTree. # This way we can do hetero tree nodes in rewrite. return el def nextNode(self): t = self._next() return self.adaptor.createWithPayload(t) def nextToken(self): return self._next() def dup(self, el): raise TypeError("dup can't be called for a token stream.") class RewriteRuleSubtreeStream(RewriteRuleElementStream): """@brief Internal helper class.""" def nextNode(self): """ Treat next element as a single node even if it's a subtree. This is used instead of next() when the result has to be a tree root node. Also prevents us from duplicating recently-added children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration must dup the type node, but ID has been added. Referencing a rule result twice is ok; dup entire tree as we can't be adding trees as root; e.g., expr expr. Hideous code duplication here with super.next(). Can't think of a proper way to refactor. This needs to always call dup node and super.next() doesn't know which to call: dup node or dup tree. """ if (self.dirty or (self.cursor >= len(self) and len(self) == 1) ): # if out of elements and size is 1, dup (at most a single node # since this is for making root nodes). el = self._next() return self.adaptor.dupNode(el) # test size above then fetch el = self._next() while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1: el = self.adaptor.getChild(el, 0) # dup just the root (want node here) return self.adaptor.dupNode(el) def dup(self, el): return self.adaptor.dupTree(el) class RewriteRuleNodeStream(RewriteRuleElementStream): """ Queues up nodes matched on left side of -> in a tree parser. This is the analog of RewriteRuleTokenStream for normal parsers. """ def nextNode(self): return self._next() def toTree(self, el): return self.adaptor.dupNode(el) def dup(self, el): # we dup every node, so don't have to worry about calling dup; short- #circuited next() so it doesn't call. raise TypeError("dup can't be called for a node stream.") class TreeRuleReturnScope(RuleReturnScope): """ This is identical to the ParserRuleReturnScope except that the start property is a tree nodes not Token object when you are parsing trees. To be generic the tree node types have to be Object. """ def __init__(self): self.start = None self.tree = None def getStart(self): return self.start def getTree(self): return self.tree
apache-2.0
danielcbit/vdt
src/flow-monitor/bindings/modulegen__gcc_LP64.py
4
368610
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.flow_monitor', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper [class] module.add_class('FlowMonitorHelper') ## histogram.h (module 'flow-monitor'): ns3::Histogram [class] module.add_class('Histogram') ## int-to-type.h (module 'core'): ns3::IntToType<0> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0']) ## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<1> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1']) ## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<2> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2']) ## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<3> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3']) ## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<4> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4']) ## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<5> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5']) ## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core') ## int-to-type.h (module 'core'): ns3::IntToType<6> [struct] module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6']) ## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration] module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress [class] module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e [enumeration] module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e [enumeration] module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## timer.h (module 'core'): ns3::Timer [class] module.add_class('Timer', import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration] module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer.h (module 'core'): ns3::Timer::State [enumeration] module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core') ## timer-impl.h (module 'core'): ns3::TimerImpl [class] module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'CS1', 'AF11', 'AF12', 'AF13', 'CS2', 'AF21', 'AF22', 'AF23', 'CS3', 'AF31', 'AF32', 'AF33', 'CS4', 'AF41', 'AF42', 'AF43', 'CS5', 'EF', 'CS6', 'CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['NotECT', 'ECT1', 'ECT0', 'CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv6-header.h (module 'internet'): ns3::Ipv6Header [class] module.add_class('Ipv6Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv6-header.h (module 'internet'): ns3::Ipv6Header::NextHeader_e [enumeration] module.add_enum('NextHeader_e', ['IPV6_EXT_HOP_BY_HOP', 'IPV6_IPV4', 'IPV6_TCP', 'IPV6_UDP', 'IPV6_IPV6', 'IPV6_EXT_ROUTING', 'IPV6_EXT_FRAGMENTATION', 'IPV6_EXT_CONFIDENTIALITY', 'IPV6_EXT_AUTHENTIFICATION', 'IPV6_ICMPV6', 'IPV6_EXT_END', 'IPV6_EXT_DESTINATION', 'IPV6_SCTP', 'IPV6_EXT_MOBILITY', 'IPV6_UDP_LITE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FlowClassifier', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FlowClassifier>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FlowProbe', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FlowProbe>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## flow-classifier.h (module 'flow-monitor'): ns3::FlowClassifier [class] module.add_class('FlowClassifier', parent=root_module['ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >']) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor [class] module.add_class('FlowMonitor', parent=root_module['ns3::Object']) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats [struct] module.add_class('FlowStats', outer_class=root_module['ns3::FlowMonitor']) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe [class] module.add_class('FlowProbe', parent=root_module['ns3::SimpleRefCount< ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >']) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats [struct] module.add_class('FlowStats', outer_class=root_module['ns3::FlowProbe']) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol [class] module.add_class('IpL4Protocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::RxStatus [enumeration] module.add_enum('RxStatus', ['RX_OK', 'RX_CSUM_FAILED', 'RX_ENDPOINT_CLOSED', 'RX_ENDPOINT_UNREACH'], outer_class=root_module['ns3::IpL4Protocol'], import_from_module='ns.internet') ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier [class] module.add_class('Ipv4FlowClassifier', parent=root_module['ns3::FlowClassifier']) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple [struct] module.add_class('FiveTuple', outer_class=root_module['ns3::Ipv4FlowClassifier']) ## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe [class] module.add_class('Ipv4FlowProbe', parent=root_module['ns3::FlowProbe']) ## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe::DropReason [enumeration] module.add_enum('DropReason', ['DROP_NO_ROUTE', 'DROP_TTL_EXPIRE', 'DROP_BAD_CHECKSUM', 'DROP_QUEUE', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT', 'DROP_INVALID_REASON'], outer_class=root_module['ns3::Ipv4FlowProbe']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol [class] module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4']) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::DropReason [enumeration] module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-interface.h (module 'internet'): ns3::Ipv6Interface [class] module.add_class('Ipv6Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) module.add_container('std::vector< unsigned int >', 'unsigned int', container_type='vector') module.add_container('std::vector< unsigned long >', 'long unsigned int', container_type='vector') module.add_container('std::map< unsigned int, ns3::FlowMonitor::FlowStats >', ('unsigned int', 'ns3::FlowMonitor::FlowStats'), container_type='map') module.add_container('std::vector< ns3::Ptr< ns3::FlowProbe > >', 'ns3::Ptr< ns3::FlowProbe >', container_type='vector') module.add_container('std::map< unsigned int, ns3::FlowProbe::FlowStats >', ('unsigned int', 'ns3::FlowProbe::FlowStats'), container_type='map') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type='map') typehandlers.add_type_alias('uint32_t', 'ns3::FlowPacketId') typehandlers.add_type_alias('uint32_t*', 'ns3::FlowPacketId*') typehandlers.add_type_alias('uint32_t&', 'ns3::FlowPacketId&') typehandlers.add_type_alias('uint32_t', 'ns3::FlowId') typehandlers.add_type_alias('uint32_t*', 'ns3::FlowId*') typehandlers.add_type_alias('uint32_t&', 'ns3::FlowId&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3FlowMonitorHelper_methods(root_module, root_module['ns3::FlowMonitorHelper']) register_Ns3Histogram_methods(root_module, root_module['ns3::Histogram']) register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >']) register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >']) register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >']) register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >']) register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >']) register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >']) register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6InterfaceAddress_methods(root_module, root_module['ns3::Ipv6InterfaceAddress']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3Timer_methods(root_module, root_module['ns3::Timer']) register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Ipv6Header_methods(root_module, root_module['ns3::Ipv6Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >']) register_Ns3SimpleRefCount__Ns3FlowProbe_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowProbe__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3FlowClassifier_methods(root_module, root_module['ns3::FlowClassifier']) register_Ns3FlowMonitor_methods(root_module, root_module['ns3::FlowMonitor']) register_Ns3FlowMonitorFlowStats_methods(root_module, root_module['ns3::FlowMonitor::FlowStats']) register_Ns3FlowProbe_methods(root_module, root_module['ns3::FlowProbe']) register_Ns3FlowProbeFlowStats_methods(root_module, root_module['ns3::FlowProbe::FlowStats']) register_Ns3IpL4Protocol_methods(root_module, root_module['ns3::IpL4Protocol']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4FlowClassifier_methods(root_module, root_module['ns3::Ipv4FlowClassifier']) register_Ns3Ipv4FlowClassifierFiveTuple_methods(root_module, root_module['ns3::Ipv4FlowClassifier::FiveTuple']) register_Ns3Ipv4FlowProbe_methods(root_module, root_module['ns3::Ipv4FlowProbe']) register_Ns3Ipv4L3Protocol_methods(root_module, root_module['ns3::Ipv4L3Protocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6Interface_methods(root_module, root_module['ns3::Ipv6Interface']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3FlowMonitorHelper_methods(root_module, cls): ## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper::FlowMonitorHelper(ns3::FlowMonitorHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowMonitorHelper const &', 'arg0')]) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::FlowMonitorHelper::FlowMonitorHelper() [constructor] cls.add_constructor([]) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowClassifier> ns3::FlowMonitorHelper::GetClassifier() [member function] cls.add_method('GetClassifier', 'ns3::Ptr< ns3::FlowClassifier >', []) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::GetMonitor() [member function] cls.add_method('GetMonitor', 'ns3::Ptr< ns3::FlowMonitor >', []) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::Install(ns3::NodeContainer nodes) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::FlowMonitor >', [param('ns3::NodeContainer', 'nodes')]) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::Install(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Install', 'ns3::Ptr< ns3::FlowMonitor >', [param('ns3::Ptr< ns3::Node >', 'node')]) ## flow-monitor-helper.h (module 'flow-monitor'): ns3::Ptr<ns3::FlowMonitor> ns3::FlowMonitorHelper::InstallAll() [member function] cls.add_method('InstallAll', 'ns3::Ptr< ns3::FlowMonitor >', []) ## flow-monitor-helper.h (module 'flow-monitor'): void ns3::FlowMonitorHelper::SetMonitorAttribute(std::string n1, ns3::AttributeValue const & v1) [member function] cls.add_method('SetMonitorAttribute', 'void', [param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')]) return def register_Ns3Histogram_methods(root_module, cls): ## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram(ns3::Histogram const & arg0) [copy constructor] cls.add_constructor([param('ns3::Histogram const &', 'arg0')]) ## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram(double binWidth) [constructor] cls.add_constructor([param('double', 'binWidth')]) ## histogram.h (module 'flow-monitor'): ns3::Histogram::Histogram() [constructor] cls.add_constructor([]) ## histogram.h (module 'flow-monitor'): void ns3::Histogram::AddValue(double value) [member function] cls.add_method('AddValue', 'void', [param('double', 'value')]) ## histogram.h (module 'flow-monitor'): uint32_t ns3::Histogram::GetBinCount(uint32_t index) [member function] cls.add_method('GetBinCount', 'uint32_t', [param('uint32_t', 'index')]) ## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinEnd(uint32_t index) [member function] cls.add_method('GetBinEnd', 'double', [param('uint32_t', 'index')]) ## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinStart(uint32_t index) [member function] cls.add_method('GetBinStart', 'double', [param('uint32_t', 'index')]) ## histogram.h (module 'flow-monitor'): double ns3::Histogram::GetBinWidth(uint32_t index) const [member function] cls.add_method('GetBinWidth', 'double', [param('uint32_t', 'index')], is_const=True) ## histogram.h (module 'flow-monitor'): uint32_t ns3::Histogram::GetNBins() const [member function] cls.add_method('GetNBins', 'uint32_t', [], is_const=True) ## histogram.h (module 'flow-monitor'): void ns3::Histogram::SerializeToXmlStream(std::ostream & os, int indent, std::string elementName) const [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('int', 'indent'), param('std::string', 'elementName')], is_const=True) ## histogram.h (module 'flow-monitor'): void ns3::Histogram::SetDefaultBinWidth(double binWidth) [member function] cls.add_method('SetDefaultBinWidth', 'void', [param('double', 'binWidth')]) return def register_Ns3IntToType__0_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')]) return def register_Ns3IntToType__1_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')]) return def register_Ns3IntToType__2_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')]) return def register_Ns3IntToType__3_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')]) return def register_Ns3IntToType__4_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')]) return def register_Ns3IntToType__5_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')]) return def register_Ns3IntToType__6_methods(root_module, cls): ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor] cls.add_constructor([]) ## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function] cls.add_method('IsIpv4MappedAddress', 'bool', []) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'address')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6Address address, ns3::Ipv6Prefix prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'address'), param('ns3::Ipv6Prefix', 'prefix')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Ipv6InterfaceAddress(ns3::Ipv6InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv6InterfaceAddress const &', 'o')]) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6InterfaceAddress::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): uint32_t ns3::Ipv6InterfaceAddress::GetNsDadUid() const [member function] cls.add_method('GetNsDadUid', 'uint32_t', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6Prefix ns3::Ipv6InterfaceAddress::GetPrefix() const [member function] cls.add_method('GetPrefix', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::Scope_e ns3::Ipv6InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv6InterfaceAddress::Scope_e', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): ns3::Ipv6InterfaceAddress::State_e ns3::Ipv6InterfaceAddress::GetState() const [member function] cls.add_method('GetState', 'ns3::Ipv6InterfaceAddress::State_e', [], is_const=True) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetAddress(ns3::Ipv6Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Ipv6Address', 'address')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetNsDadUid(uint32_t uid) [member function] cls.add_method('SetNsDadUid', 'void', [param('uint32_t', 'uid')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetScope(ns3::Ipv6InterfaceAddress::Scope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv6InterfaceAddress::Scope_e', 'scope')]) ## ipv6-interface-address.h (module 'internet'): void ns3::Ipv6InterfaceAddress::SetState(ns3::Ipv6InterfaceAddress::State_e state) [member function] cls.add_method('SetState', 'void', [param('ns3::Ipv6InterfaceAddress::State_e', 'state')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3Timer_methods(root_module, cls): ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Timer const &', 'arg0')]) ## timer.h (module 'core'): ns3::Timer::Timer() [constructor] cls.add_constructor([]) ## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor] cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')]) ## timer.h (module 'core'): void ns3::Timer::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function] cls.add_method('GetDelay', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [], is_const=True) ## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function] cls.add_method('GetState', 'ns3::Timer::State', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function] cls.add_method('IsSuspended', 'bool', [], is_const=True) ## timer.h (module 'core'): void ns3::Timer::Remove() [member function] cls.add_method('Remove', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Resume() [member function] cls.add_method('Resume', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule() [member function] cls.add_method('Schedule', 'void', []) ## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function] cls.add_method('Schedule', 'void', [param('ns3::Time', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function] cls.add_method('SetDelay', 'void', [param('ns3::Time const &', 'delay')]) ## timer.h (module 'core'): void ns3::Timer::Suspend() [member function] cls.add_method('Suspend', 'void', []) return def register_Ns3TimerImpl_methods(root_module, cls): ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor] cls.add_constructor([]) ## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')]) ## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function] cls.add_method('Invoke', 'void', [], is_pure_virtual=True, is_virtual=True) ## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function] cls.add_method('Schedule', 'ns3::EventId', [param('ns3::Time const &', 'delay')], is_pure_virtual=True, is_virtual=True) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Ipv4Header_methods(root_module, cls): ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')]) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor] cls.add_constructor([]) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function] cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function] cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function] cls.add_method('EnableChecksum', 'void', []) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function] cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function] cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function] cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function] cls.add_method('GetIdentification', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function] cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function] cls.add_method('IsChecksumOk', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function] cls.add_method('IsDontFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function] cls.add_method('IsLastFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function] cls.add_method('SetDontFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function] cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function] cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function] cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function] cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function] cls.add_method('SetLastFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function] cls.add_method('SetMayFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function] cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function] cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3Ipv6Header_methods(root_module, cls): ## ipv6-header.h (module 'internet'): ns3::Ipv6Header::Ipv6Header(ns3::Ipv6Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6Header const &', 'arg0')]) ## ipv6-header.h (module 'internet'): ns3::Ipv6Header::Ipv6Header() [constructor] cls.add_constructor([]) ## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv6-header.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6Header::GetDestinationAddress() const [member function] cls.add_method('GetDestinationAddress', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::GetFlowLabel() const [member function] cls.add_method('GetFlowLabel', 'uint32_t', [], is_const=True) ## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## ipv6-header.h (module 'internet'): ns3::TypeId ns3::Ipv6Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetNextHeader() const [member function] cls.add_method('GetNextHeader', 'uint8_t', [], is_const=True) ## ipv6-header.h (module 'internet'): uint16_t ns3::Ipv6Header::GetPayloadLength() const [member function] cls.add_method('GetPayloadLength', 'uint16_t', [], is_const=True) ## ipv6-header.h (module 'internet'): uint32_t ns3::Ipv6Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv6-header.h (module 'internet'): ns3::Ipv6Address ns3::Ipv6Header::GetSourceAddress() const [member function] cls.add_method('GetSourceAddress', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-header.h (module 'internet'): uint8_t ns3::Ipv6Header::GetTrafficClass() const [member function] cls.add_method('GetTrafficClass', 'uint8_t', [], is_const=True) ## ipv6-header.h (module 'internet'): static ns3::TypeId ns3::Ipv6Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetDestinationAddress(ns3::Ipv6Address dst) [member function] cls.add_method('SetDestinationAddress', 'void', [param('ns3::Ipv6Address', 'dst')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetFlowLabel(uint32_t flow) [member function] cls.add_method('SetFlowLabel', 'void', [param('uint32_t', 'flow')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetHopLimit(uint8_t limit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'limit')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetNextHeader(uint8_t next) [member function] cls.add_method('SetNextHeader', 'void', [param('uint8_t', 'next')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetPayloadLength(uint16_t len) [member function] cls.add_method('SetPayloadLength', 'void', [param('uint16_t', 'len')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetSourceAddress(ns3::Ipv6Address src) [member function] cls.add_method('SetSourceAddress', 'void', [param('ns3::Ipv6Address', 'src')]) ## ipv6-header.h (module 'internet'): void ns3::Ipv6Header::SetTrafficClass(uint8_t traffic) [member function] cls.add_method('SetTrafficClass', 'void', [param('uint8_t', 'traffic')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter< ns3::FlowClassifier > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3FlowProbe_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowProbe__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter< ns3::FlowProbe > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function] cls.add_method('IsRecvPktInfo', 'bool', [], is_const=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketAddressTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')]) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3FlowClassifier_methods(root_module, cls): ## flow-classifier.h (module 'flow-monitor'): ns3::FlowClassifier::FlowClassifier() [constructor] cls.add_constructor([]) ## flow-classifier.h (module 'flow-monitor'): void ns3::FlowClassifier::SerializeToXmlStream(std::ostream & os, int indent) const [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('int', 'indent')], is_pure_virtual=True, is_const=True, is_virtual=True) ## flow-classifier.h (module 'flow-monitor'): ns3::FlowId ns3::FlowClassifier::GetNewFlowId() [member function] cls.add_method('GetNewFlowId', 'ns3::FlowId', [], visibility='protected') return def register_Ns3FlowMonitor_methods(root_module, cls): ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowMonitor(ns3::FlowMonitor const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowMonitor const &', 'arg0')]) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowMonitor() [constructor] cls.add_constructor([]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::AddProbe(ns3::Ptr<ns3::FlowProbe> probe) [member function] cls.add_method('AddProbe', 'void', [param('ns3::Ptr< ns3::FlowProbe >', 'probe')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::CheckForLostPackets() [member function] cls.add_method('CheckForLostPackets', 'void', []) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::CheckForLostPackets(ns3::Time maxDelay) [member function] cls.add_method('CheckForLostPackets', 'void', [param('ns3::Time', 'maxDelay')]) ## flow-monitor.h (module 'flow-monitor'): std::vector<ns3::Ptr<ns3::FlowProbe>, std::allocator<ns3::Ptr<ns3::FlowProbe> > > ns3::FlowMonitor::GetAllProbes() const [member function] cls.add_method('GetAllProbes', 'std::vector< ns3::Ptr< ns3::FlowProbe > >', [], is_const=True) ## flow-monitor.h (module 'flow-monitor'): std::map<unsigned int, ns3::FlowMonitor::FlowStats, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, ns3::FlowMonitor::FlowStats> > > ns3::FlowMonitor::GetFlowStats() const [member function] cls.add_method('GetFlowStats', 'std::map< unsigned int, ns3::FlowMonitor::FlowStats >', [], is_const=True) ## flow-monitor.h (module 'flow-monitor'): ns3::TypeId ns3::FlowMonitor::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## flow-monitor.h (module 'flow-monitor'): static ns3::TypeId ns3::FlowMonitor::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportDrop(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize, uint32_t reasonCode) [member function] cls.add_method('ReportDrop', 'void', [param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize'), param('uint32_t', 'reasonCode')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportFirstTx(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function] cls.add_method('ReportFirstTx', 'void', [param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportForwarding(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function] cls.add_method('ReportForwarding', 'void', [param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::ReportLastRx(ns3::Ptr<ns3::FlowProbe> probe, ns3::FlowId flowId, ns3::FlowPacketId packetId, uint32_t packetSize) [member function] cls.add_method('ReportLastRx', 'void', [param('ns3::Ptr< ns3::FlowProbe >', 'probe'), param('ns3::FlowId', 'flowId'), param('ns3::FlowPacketId', 'packetId'), param('uint32_t', 'packetSize')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::SerializeToXmlFile(std::string fileName, bool enableHistograms, bool enableProbes) [member function] cls.add_method('SerializeToXmlFile', 'void', [param('std::string', 'fileName'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::SerializeToXmlStream(std::ostream & os, int indent, bool enableHistograms, bool enableProbes) [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('int', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')]) ## flow-monitor.h (module 'flow-monitor'): std::string ns3::FlowMonitor::SerializeToXmlString(int indent, bool enableHistograms, bool enableProbes) [member function] cls.add_method('SerializeToXmlString', 'std::string', [param('int', 'indent'), param('bool', 'enableHistograms'), param('bool', 'enableProbes')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::SetFlowClassifier(ns3::Ptr<ns3::FlowClassifier> classifier) [member function] cls.add_method('SetFlowClassifier', 'void', [param('ns3::Ptr< ns3::FlowClassifier >', 'classifier')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::Start(ns3::Time const & time) [member function] cls.add_method('Start', 'void', [param('ns3::Time const &', 'time')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::StartRightNow() [member function] cls.add_method('StartRightNow', 'void', []) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')]) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::StopRightNow() [member function] cls.add_method('StopRightNow', 'void', []) ## flow-monitor.h (module 'flow-monitor'): void ns3::FlowMonitor::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3FlowMonitorFlowStats_methods(root_module, cls): ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::FlowStats() [constructor] cls.add_constructor([]) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::FlowStats(ns3::FlowMonitor::FlowStats const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowMonitor::FlowStats const &', 'arg0')]) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::bytesDropped [variable] cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long >', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::delayHistogram [variable] cls.add_instance_attribute('delayHistogram', 'ns3::Histogram', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::delaySum [variable] cls.add_instance_attribute('delaySum', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::flowInterruptionsHistogram [variable] cls.add_instance_attribute('flowInterruptionsHistogram', 'ns3::Histogram', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::jitterHistogram [variable] cls.add_instance_attribute('jitterHistogram', 'ns3::Histogram', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::jitterSum [variable] cls.add_instance_attribute('jitterSum', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::lastDelay [variable] cls.add_instance_attribute('lastDelay', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::lostPackets [variable] cls.add_instance_attribute('lostPackets', 'uint32_t', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::packetSizeHistogram [variable] cls.add_instance_attribute('packetSizeHistogram', 'ns3::Histogram', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::packetsDropped [variable] cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::rxBytes [variable] cls.add_instance_attribute('rxBytes', 'uint64_t', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::rxPackets [variable] cls.add_instance_attribute('rxPackets', 'uint32_t', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeFirstRxPacket [variable] cls.add_instance_attribute('timeFirstRxPacket', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeFirstTxPacket [variable] cls.add_instance_attribute('timeFirstTxPacket', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeLastRxPacket [variable] cls.add_instance_attribute('timeLastRxPacket', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timeLastTxPacket [variable] cls.add_instance_attribute('timeLastTxPacket', 'ns3::Time', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::timesForwarded [variable] cls.add_instance_attribute('timesForwarded', 'uint32_t', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::txBytes [variable] cls.add_instance_attribute('txBytes', 'uint64_t', is_const=False) ## flow-monitor.h (module 'flow-monitor'): ns3::FlowMonitor::FlowStats::txPackets [variable] cls.add_instance_attribute('txPackets', 'uint32_t', is_const=False) return def register_Ns3FlowProbe_methods(root_module, cls): ## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::AddPacketDropStats(ns3::FlowId flowId, uint32_t packetSize, uint32_t reasonCode) [member function] cls.add_method('AddPacketDropStats', 'void', [param('ns3::FlowId', 'flowId'), param('uint32_t', 'packetSize'), param('uint32_t', 'reasonCode')]) ## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::AddPacketStats(ns3::FlowId flowId, uint32_t packetSize, ns3::Time delayFromFirstProbe) [member function] cls.add_method('AddPacketStats', 'void', [param('ns3::FlowId', 'flowId'), param('uint32_t', 'packetSize'), param('ns3::Time', 'delayFromFirstProbe')]) ## flow-probe.h (module 'flow-monitor'): std::map<unsigned int, ns3::FlowProbe::FlowStats, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, ns3::FlowProbe::FlowStats> > > ns3::FlowProbe::GetStats() const [member function] cls.add_method('GetStats', 'std::map< unsigned int, ns3::FlowProbe::FlowStats >', [], is_const=True) ## flow-probe.h (module 'flow-monitor'): void ns3::FlowProbe::SerializeToXmlStream(std::ostream & os, int indent, uint32_t index) const [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('int', 'indent'), param('uint32_t', 'index')], is_const=True) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowProbe(ns3::Ptr<ns3::FlowMonitor> flowMonitor) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::FlowMonitor >', 'flowMonitor')], visibility='protected') return def register_Ns3FlowProbeFlowStats_methods(root_module, cls): ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::FlowStats(ns3::FlowProbe::FlowStats const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowProbe::FlowStats const &', 'arg0')]) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::FlowStats() [constructor] cls.add_constructor([]) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::bytes [variable] cls.add_instance_attribute('bytes', 'uint64_t', is_const=False) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::bytesDropped [variable] cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long >', is_const=False) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::delayFromFirstProbeSum [variable] cls.add_instance_attribute('delayFromFirstProbeSum', 'ns3::Time', is_const=False) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::packets [variable] cls.add_instance_attribute('packets', 'uint32_t', is_const=False) ## flow-probe.h (module 'flow-monitor'): ns3::FlowProbe::FlowStats::packetsDropped [variable] cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False) return def register_Ns3IpL4Protocol_methods(root_module, cls): ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::IpL4Protocol() [constructor] cls.add_constructor([]) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::IpL4Protocol(ns3::IpL4Protocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::IpL4Protocol const &', 'arg0')]) ## ip-l4-protocol.h (module 'internet'): ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::IpL4Protocol::GetDownTarget() const [member function] cls.add_method('GetDownTarget', 'ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv6Address,ns3::Ipv6Address,unsigned char,ns3::Ptr<ns3::Ipv6Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::IpL4Protocol::GetDownTarget6() const [member function] cls.add_method('GetDownTarget6', 'ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Address, ns3::Ipv6Address, unsigned char, ns3::Ptr< ns3::Ipv6Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): int ns3::IpL4Protocol::GetProtocolNumber() const [member function] cls.add_method('GetProtocolNumber', 'int', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): static ns3::TypeId ns3::IpL4Protocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::RxStatus ns3::IpL4Protocol::Receive(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::Ipv4Interface> incomingInterface) [member function] cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')], is_pure_virtual=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): ns3::IpL4Protocol::RxStatus ns3::IpL4Protocol::Receive(ns3::Ptr<ns3::Packet> p, ns3::Ipv6Address & src, ns3::Ipv6Address & dst, ns3::Ptr<ns3::Ipv6Interface> incomingInterface) [member function] cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv6Address &', 'src'), param('ns3::Ipv6Address &', 'dst'), param('ns3::Ptr< ns3::Ipv6Interface >', 'incomingInterface')], is_pure_virtual=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): void ns3::IpL4Protocol::ReceiveIcmp(ns3::Ipv4Address icmpSource, uint8_t icmpTtl, uint8_t icmpType, uint8_t icmpCode, uint32_t icmpInfo, ns3::Ipv4Address payloadSource, ns3::Ipv4Address payloadDestination, uint8_t const * payload) [member function] cls.add_method('ReceiveIcmp', 'void', [param('ns3::Ipv4Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo'), param('ns3::Ipv4Address', 'payloadSource'), param('ns3::Ipv4Address', 'payloadDestination'), param('uint8_t const *', 'payload')], is_virtual=True) ## ip-l4-protocol.h (module 'internet'): void ns3::IpL4Protocol::ReceiveIcmp(ns3::Ipv6Address icmpSource, uint8_t icmpTtl, uint8_t icmpType, uint8_t icmpCode, uint32_t icmpInfo, ns3::Ipv6Address payloadSource, ns3::Ipv6Address payloadDestination, uint8_t const * payload) [member function] cls.add_method('ReceiveIcmp', 'void', [param('ns3::Ipv6Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo'), param('ns3::Ipv6Address', 'payloadSource'), param('ns3::Ipv6Address', 'payloadDestination'), param('uint8_t const *', 'payload')], is_virtual=True) ## ip-l4-protocol.h (module 'internet'): void ns3::IpL4Protocol::SetDownTarget(ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv4Address,ns3::Ipv4Address,unsigned char,ns3::Ptr<ns3::Ipv4Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetDownTarget', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## ip-l4-protocol.h (module 'internet'): void ns3::IpL4Protocol::SetDownTarget6(ns3::Callback<void,ns3::Ptr<ns3::Packet>,ns3::Ipv6Address,ns3::Ipv6Address,unsigned char,ns3::Ptr<ns3::Ipv6Route>,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetDownTarget6', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Address, ns3::Ipv6Address, unsigned char, ns3::Ptr< ns3::Ipv6Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv4_methods(root_module, cls): ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')]) ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor] cls.add_constructor([]) ## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'interface'), param('bool', 'val')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'interface'), param('uint16_t', 'metric')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable] cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], is_pure_virtual=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4FlowClassifier_methods(root_module, cls): ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::Ipv4FlowClassifier() [constructor] cls.add_constructor([]) ## ipv4-flow-classifier.h (module 'flow-monitor'): bool ns3::Ipv4FlowClassifier::Classify(ns3::Ipv4Header const & ipHeader, ns3::Ptr<const ns3::Packet> ipPayload, uint32_t * out_flowId, uint32_t * out_packetId) [member function] cls.add_method('Classify', 'bool', [param('ns3::Ipv4Header const &', 'ipHeader'), param('ns3::Ptr< ns3::Packet const >', 'ipPayload'), param('uint32_t *', 'out_flowId'), param('uint32_t *', 'out_packetId')]) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple ns3::Ipv4FlowClassifier::FindFlow(ns3::FlowId flowId) const [member function] cls.add_method('FindFlow', 'ns3::Ipv4FlowClassifier::FiveTuple', [param('ns3::FlowId', 'flowId')], is_const=True) ## ipv4-flow-classifier.h (module 'flow-monitor'): void ns3::Ipv4FlowClassifier::SerializeToXmlStream(std::ostream & os, int indent) const [member function] cls.add_method('SerializeToXmlStream', 'void', [param('std::ostream &', 'os'), param('int', 'indent')], is_const=True, is_virtual=True) return def register_Ns3Ipv4FlowClassifierFiveTuple_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('==') ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::FiveTuple() [constructor] cls.add_constructor([]) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::FiveTuple(ns3::Ipv4FlowClassifier::FiveTuple const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4FlowClassifier::FiveTuple const &', 'arg0')]) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::destinationAddress [variable] cls.add_instance_attribute('destinationAddress', 'ns3::Ipv4Address', is_const=False) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::destinationPort [variable] cls.add_instance_attribute('destinationPort', 'uint16_t', is_const=False) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::protocol [variable] cls.add_instance_attribute('protocol', 'uint8_t', is_const=False) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::sourceAddress [variable] cls.add_instance_attribute('sourceAddress', 'ns3::Ipv4Address', is_const=False) ## ipv4-flow-classifier.h (module 'flow-monitor'): ns3::Ipv4FlowClassifier::FiveTuple::sourcePort [variable] cls.add_instance_attribute('sourcePort', 'uint16_t', is_const=False) return def register_Ns3Ipv4FlowProbe_methods(root_module, cls): ## ipv4-flow-probe.h (module 'flow-monitor'): ns3::Ipv4FlowProbe::Ipv4FlowProbe(ns3::Ptr<ns3::FlowMonitor> monitor, ns3::Ptr<ns3::Ipv4FlowClassifier> classifier, ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::FlowMonitor >', 'monitor'), param('ns3::Ptr< ns3::Ipv4FlowClassifier >', 'classifier'), param('ns3::Ptr< ns3::Node >', 'node')]) return def register_Ns3Ipv4L3Protocol_methods(root_module, cls): ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::Ipv4L3Protocol() [constructor] cls.add_constructor([]) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::AddAddress(uint32_t i, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'i'), param('ns3::Ipv4InterfaceAddress', 'address')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4L3Protocol::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', [], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4L3Protocol::GetAddress(uint32_t interfaceIndex, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Interface> ns3::Ipv4L3Protocol::GetInterface(uint32_t i) const [member function] cls.add_method('GetInterface', 'ns3::Ptr< ns3::Ipv4Interface >', [param('uint32_t', 'i')], is_const=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForAddress(ns3::Ipv4Address addr) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'addr')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): int32_t ns3::Ipv4L3Protocol::GetInterfaceForPrefix(ns3::Ipv4Address addr, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'addr'), param('ns3::Ipv4Mask', 'mask')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMetric(uint32_t i) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint16_t ns3::Ipv4L3Protocol::GetMtu(uint32_t i) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): uint32_t ns3::Ipv4L3Protocol::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4L3Protocol::GetNetDevice(uint32_t i) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4L3Protocol::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4L3Protocol::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4L3Protocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsForwarding(uint32_t i) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::IsUp(uint32_t i) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Receive(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> p, uint16_t protocol, ns3::Address const & from, ns3::Address const & to, ns3::NetDevice::PacketType packetType) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'from'), param('ns3::Address const &', 'to'), param('ns3::NetDevice::PacketType', 'packetType')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')]) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::RemoveAddress(uint32_t interfaceIndex, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interfaceIndex'), param('uint32_t', 'addressIndex')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4L3Protocol::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDefaultTtl(uint8_t ttl) [member function] cls.add_method('SetDefaultTtl', 'void', [param('uint8_t', 'ttl')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetDown(uint32_t i) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetForwarding(uint32_t i, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'i'), param('bool', 'val')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetMetric(uint32_t i, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'i'), param('uint16_t', 'metric')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetUp(uint32_t i) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'i')], is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): ns3::Ipv4L3Protocol::PROT_NUMBER [variable] cls.add_static_attribute('PROT_NUMBER', 'uint16_t const', is_const=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_const=True, visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): bool ns3::Ipv4L3Protocol::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_const=True, visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], visibility='private', is_virtual=True) ## ipv4-l3-protocol.h (module 'internet'): void ns3::Ipv4L3Protocol::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], visibility='private', is_virtual=True) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv4MulticastRoute_methods(root_module, cls): ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function] cls.add_method('GetGroup', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetOutputTtl(uint32_t oif) [member function] cls.add_method('GetOutputTtl', 'uint32_t', [param('uint32_t', 'oif')], deprecated=True) ## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function] cls.add_method('GetOutputTtlMap', 'std::map< unsigned int, unsigned int >', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function] cls.add_method('GetParent', 'uint32_t', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function] cls.add_method('SetGroup', 'void', [param('ns3::Ipv4Address const', 'group')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address const', 'origin')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function] cls.add_method('SetOutputTtl', 'void', [param('uint32_t', 'oif'), param('uint32_t', 'ttl')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function] cls.add_method('SetParent', 'void', [param('uint32_t', 'iif')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable] cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable] cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True) return def register_Ns3Ipv4Route_methods(root_module, cls): cls.add_output_stream_operator() ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function] cls.add_method('GetGateway', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'dest')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function] cls.add_method('SetGateway', 'void', [param('ns3::Ipv4Address', 'gw')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'src')]) return def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls): ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor] cls.add_constructor([]) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')]) ## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6Interface_methods(root_module, cls): ## ipv6-interface.h (module 'internet'): ns3::Ipv6Interface::Ipv6Interface(ns3::Ipv6Interface const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6Interface const &', 'arg0')]) ## ipv6-interface.h (module 'internet'): ns3::Ipv6Interface::Ipv6Interface() [constructor] cls.add_constructor([]) ## ipv6-interface.h (module 'internet'): bool ns3::Ipv6Interface::AddAddress(ns3::Ipv6InterfaceAddress iface) [member function] cls.add_method('AddAddress', 'bool', [param('ns3::Ipv6InterfaceAddress', 'iface')]) ## ipv6-interface.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6Interface::GetAddress(uint32_t index) const [member function] cls.add_method('GetAddress', 'ns3::Ipv6InterfaceAddress', [param('uint32_t', 'index')], is_const=True) ## ipv6-interface.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6Interface::GetAddressMatchingDestination(ns3::Ipv6Address dst) [member function] cls.add_method('GetAddressMatchingDestination', 'ns3::Ipv6InterfaceAddress', [param('ns3::Ipv6Address', 'dst')]) ## ipv6-interface.h (module 'internet'): uint16_t ns3::Ipv6Interface::GetBaseReachableTime() const [member function] cls.add_method('GetBaseReachableTime', 'uint16_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): uint8_t ns3::Ipv6Interface::GetCurHopLimit() const [member function] cls.add_method('GetCurHopLimit', 'uint8_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv6Interface::GetDevice() const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True, is_virtual=True) ## ipv6-interface.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6Interface::GetLinkLocalAddress() const [member function] cls.add_method('GetLinkLocalAddress', 'ns3::Ipv6InterfaceAddress', [], is_const=True) ## ipv6-interface.h (module 'internet'): uint16_t ns3::Ipv6Interface::GetMetric() const [member function] cls.add_method('GetMetric', 'uint16_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): uint32_t ns3::Ipv6Interface::GetNAddresses() const [member function] cls.add_method('GetNAddresses', 'uint32_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): uint16_t ns3::Ipv6Interface::GetReachableTime() const [member function] cls.add_method('GetReachableTime', 'uint16_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): uint16_t ns3::Ipv6Interface::GetRetransTimer() const [member function] cls.add_method('GetRetransTimer', 'uint16_t', [], is_const=True) ## ipv6-interface.h (module 'internet'): static ns3::TypeId ns3::Ipv6Interface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv6-interface.h (module 'internet'): bool ns3::Ipv6Interface::IsDown() const [member function] cls.add_method('IsDown', 'bool', [], is_const=True) ## ipv6-interface.h (module 'internet'): bool ns3::Ipv6Interface::IsForwarding() const [member function] cls.add_method('IsForwarding', 'bool', [], is_const=True) ## ipv6-interface.h (module 'internet'): bool ns3::Ipv6Interface::IsUp() const [member function] cls.add_method('IsUp', 'bool', [], is_const=True) ## ipv6-interface.h (module 'internet'): ns3::Ipv6InterfaceAddress ns3::Ipv6Interface::RemoveAddress(uint32_t index) [member function] cls.add_method('RemoveAddress', 'ns3::Ipv6InterfaceAddress', [param('uint32_t', 'index')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::Send(ns3::Ptr<ns3::Packet> p, ns3::Ipv6Address dest) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv6Address', 'dest')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetBaseReachableTime(uint16_t baseReachableTime) [member function] cls.add_method('SetBaseReachableTime', 'void', [param('uint16_t', 'baseReachableTime')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetCurHopLimit(uint8_t curHopLimit) [member function] cls.add_method('SetCurHopLimit', 'void', [param('uint8_t', 'curHopLimit')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetDown() [member function] cls.add_method('SetDown', 'void', []) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetForwarding(bool forward) [member function] cls.add_method('SetForwarding', 'void', [param('bool', 'forward')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetMetric(uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint16_t', 'metric')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetNsDadUid(ns3::Ipv6Address address, uint32_t uid) [member function] cls.add_method('SetNsDadUid', 'void', [param('ns3::Ipv6Address', 'address'), param('uint32_t', 'uid')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetReachableTime(uint16_t reachableTime) [member function] cls.add_method('SetReachableTime', 'void', [param('uint16_t', 'reachableTime')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetRetransTimer(uint16_t retransTimer) [member function] cls.add_method('SetRetransTimer', 'void', [param('uint16_t', 'retransTimer')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetState(ns3::Ipv6Address address, ns3::Ipv6InterfaceAddress::State_e state) [member function] cls.add_method('SetState', 'void', [param('ns3::Ipv6Address', 'address'), param('ns3::Ipv6InterfaceAddress::State_e', 'state')]) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::SetUp() [member function] cls.add_method('SetUp', 'void', []) ## ipv6-interface.h (module 'internet'): void ns3::Ipv6Interface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'arg0')]) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
shanemikel/beets
beets/autotag/__init__.py
22
5255
# This file is part of beets. # Copyright 2015, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Facilities for automatically determining files' correct metadata. """ from __future__ import (division, absolute_import, print_function, unicode_literals) from beets import logging from beets import config # Parts of external interface. from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa from .match import tag_item, tag_album # noqa from .match import Recommendation # noqa # Global logger. log = logging.getLogger('beets') # Additional utilities for the main interface. def apply_item_metadata(item, track_info): """Set an item's metadata from its matched TrackInfo object. """ item.artist = track_info.artist item.artist_sort = track_info.artist_sort item.artist_credit = track_info.artist_credit item.title = track_info.title item.mb_trackid = track_info.track_id if track_info.artist_id: item.mb_artistid = track_info.artist_id if track_info.data_source: item.data_source = track_info.data_source # At the moment, the other metadata is left intact (including album # and track number). Perhaps these should be emptied? def apply_metadata(album_info, mapping): """Set the items' metadata to match an AlbumInfo object using a mapping from Items to TrackInfo objects. """ for item, track_info in mapping.iteritems(): # Album, artist, track count. if track_info.artist: item.artist = track_info.artist else: item.artist = album_info.artist item.albumartist = album_info.artist item.album = album_info.album # Artist sort and credit names. item.artist_sort = track_info.artist_sort or album_info.artist_sort item.artist_credit = (track_info.artist_credit or album_info.artist_credit) item.albumartist_sort = album_info.artist_sort item.albumartist_credit = album_info.artist_credit # Release date. for prefix in '', 'original_': if config['original_date'] and not prefix: # Ignore specific release date. continue for suffix in 'year', 'month', 'day': key = prefix + suffix value = getattr(album_info, key) or 0 # If we don't even have a year, apply nothing. if suffix == 'year' and not value: break # Otherwise, set the fetched value (or 0 for the month # and day if not available). item[key] = value # If we're using original release date for both fields, # also set item.year = info.original_year, etc. if config['original_date']: item[suffix] = value # Title. item.title = track_info.title if config['per_disc_numbering']: # We want to let the track number be zero, but if the medium index # is not provided we need to fall back to the overall index. item.track = track_info.medium_index if item.track is None: item.track = track_info.index item.tracktotal = track_info.medium_total or len(album_info.tracks) else: item.track = track_info.index item.tracktotal = len(album_info.tracks) # Disc and disc count. item.disc = track_info.medium item.disctotal = album_info.mediums # MusicBrainz IDs. item.mb_trackid = track_info.track_id item.mb_albumid = album_info.album_id if track_info.artist_id: item.mb_artistid = track_info.artist_id else: item.mb_artistid = album_info.artist_id item.mb_albumartistid = album_info.artist_id item.mb_releasegroupid = album_info.releasegroup_id # Compilation flag. item.comp = album_info.va # Miscellaneous metadata. for field in ('albumtype', 'label', 'asin', 'catalognum', 'script', 'language', 'country', 'albumstatus', 'albumdisambig', 'data_source',): value = getattr(album_info, field) if value is not None: item[field] = value if track_info.disctitle is not None: item.disctitle = track_info.disctitle if track_info.media is not None: item.media = track_info.media
mit
nathanaevitas/odoo
openerp/addons/account/tests/test_tax.py
449
1740
from openerp.tests.common import TransactionCase class TestTax(TransactionCase): """Tests for taxes (account.tax) We don't really need at this point to link taxes to tax codes (account.tax.code) nor to companies (base.company) to check computation results. """ def setUp(self): super(TestTax, self).setUp() self.tax_model = self.registry('account.tax') def test_programmatic_tax(self): cr, uid = self.cr, self.uid tax_id = self.tax_model.create(cr, uid, dict( name="Programmatic tax", type='code', python_compute='result = 12.0', python_compute_inv='result = 11.0', )) tax_records = self.tax_model.browse(cr, uid, [tax_id]) res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2) tax_detail = res['taxes'][0] self.assertEquals(tax_detail['amount'], 24.0) self.assertEquals(res['total_included'], 124.0) def test_percent_tax(self): """Test computations done by a 10 percent tax.""" cr, uid = self.cr, self.uid tax_id = self.tax_model.create(cr, uid, dict( name="Percent tax", type='percent', amount='0.1', )) tax_records = self.tax_model.browse(cr, uid, [tax_id]) res = self.tax_model.compute_all(cr, uid, tax_records, 50.0, 2) tax_detail = res['taxes'][0] self.assertEquals(tax_detail['amount'], 10.0) self.assertEquals(res['total_included'], 110.0) # now the inverse computation res = self.tax_model.compute_inv(cr, uid, tax_records, 55.0, 2) self.assertEquals(res[0]['amount'], 10.0)
agpl-3.0
AdaptiveApplications/carnegie
tarc_bus_locator_client/numpy-1.8.1/numpy/core/tests/test_deprecations.py
10
12074
""" Tests related to deprecation warnings. Also a convenient place to document how deprecations should eventually be turned into errors. """ from __future__ import division, absolute_import, print_function import sys import operator import warnings from nose.plugins.skip import SkipTest import numpy as np from numpy.testing import dec, run_module_suite, assert_raises class _DeprecationTestCase(object): # Just as warning: warnings uses re.match, so the start of this message # must match. message = '' def setUp(self): self.warn_ctx = warnings.catch_warnings(record=True) self.log = self.warn_ctx.__enter__() # Do *not* ignore other DeprecationWarnings. Ignoring warnings # can give very confusing results because of # http://bugs.python.org/issue4180 and it is probably simplest to # try to keep the tests cleanly giving only the right warning type. # (While checking them set to "error" those are ignored anyway) # We still have them show up, because otherwise they would be raised warnings.filterwarnings("always", category=DeprecationWarning) warnings.filterwarnings("always", message=self.message, category=DeprecationWarning) def tearDown(self): self.warn_ctx.__exit__() def assert_deprecated(self, function, num=1, ignore_others=False, function_fails=False, exceptions=(DeprecationWarning,), args=(), kwargs={}): """Test if DeprecationWarnings are given and raised. This first checks if the function when called gives `num` DeprecationWarnings, after that it tries to raise these DeprecationWarnings and compares them with `exceptions`. The exceptions can be different for cases where this code path is simply not anticipated and the exception is replaced. Parameters ---------- f : callable The function to test num : int Number of DeprecationWarnings to expect. This should normally be 1. ignore_other : bool Whether warnings of the wrong type should be ignored (note that the message is not checked) function_fails : bool If the function would normally fail, setting this will check for warnings inside a try/except block. exceptions : Exception or tuple of Exceptions Exception to expect when turning the warnings into an error. The default checks for DeprecationWarnings. If exceptions is empty the function is expected to run successfull. args : tuple Arguments for `f` kwargs : dict Keyword arguments for `f` """ # reset the log self.log[:] = [] try: function(*args, **kwargs) except (Exception if function_fails else tuple()): pass # just in case, clear the registry num_found = 0 for warning in self.log: if warning.category is DeprecationWarning: num_found += 1 elif not ignore_others: raise AssertionError("expected DeprecationWarning but %s given" % warning.category) if num_found != num: raise AssertionError("%i warnings found but %i expected" % (len(self.log), num)) with warnings.catch_warnings(): warnings.filterwarnings("error", message=self.message, category=DeprecationWarning) try: function(*args, **kwargs) if exceptions != tuple(): raise AssertionError("No error raised during function call") except exceptions: if exceptions == tuple(): raise AssertionError("Error raised during function call") def assert_not_deprecated(self, function, args=(), kwargs={}): """Test if DeprecationWarnings are given and raised. This is just a shorthand for: self.assert_deprecated(function, num=0, ignore_others=True, exceptions=tuple(), args=args, kwargs=kwargs) """ self.assert_deprecated(function, num=0, ignore_others=True, exceptions=tuple(), args=args, kwargs=kwargs) class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase): """ These test that ``DeprecationWarning`` is given when you try to use non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. After deprecation, changes need to be done inside conversion_utils.c in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter. In iterators.c the function slice_GetIndices could be removed in favor of its python equivalent and in mapping.c the function _tuple_of_integers can be simplified (if ``np.array([1]).__index__()`` is also deprecated). As for the deprecation time-frame: via Ralf Gommers, "Hard to put that as a version number, since we don't know if the version after 1.8 will be 6 months or 2 years after. I'd say 2 years is reasonable." I interpret this to mean 2 years after the 1.8 release. Possibly giving a PendingDeprecationWarning before that (which is visible by default) """ message = "using a non-integer number instead of an integer " \ "will result in an error in the future" def test_indexing(self): a = np.array([[[5]]]) def assert_deprecated(*args, **kwargs): self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) assert_deprecated(lambda: a[0.0]) assert_deprecated(lambda: a[0, 0.0]) assert_deprecated(lambda: a[0.0, 0]) assert_deprecated(lambda: a[0.0,:]) assert_deprecated(lambda: a[:, 0.0]) assert_deprecated(lambda: a[:, 0.0,:]) assert_deprecated(lambda: a[0.0,:,:], num=2) # [1] assert_deprecated(lambda: a[0, 0, 0.0]) assert_deprecated(lambda: a[0.0, 0, 0]) assert_deprecated(lambda: a[0, 0.0, 0]) assert_deprecated(lambda: a[-1.4]) assert_deprecated(lambda: a[0, -1.4]) assert_deprecated(lambda: a[-1.4, 0]) assert_deprecated(lambda: a[-1.4,:]) assert_deprecated(lambda: a[:, -1.4]) assert_deprecated(lambda: a[:, -1.4,:]) assert_deprecated(lambda: a[-1.4,:,:], num=2) # [1] assert_deprecated(lambda: a[0, 0, -1.4]) assert_deprecated(lambda: a[-1.4, 0, 0]) assert_deprecated(lambda: a[0, -1.4, 0]) # [1] These are duplicate because of the _tuple_of_integers quick check # Test that the slice parameter deprecation warning doesn't mask # the scalar index warning. assert_deprecated(lambda: a[0.0:, 0.0], num=2) assert_deprecated(lambda: a[0.0:, 0.0,:], num=2) def test_valid_indexing(self): a = np.array([[[5]]]) assert_not_deprecated = self.assert_not_deprecated assert_not_deprecated(lambda: a[np.array([0])]) assert_not_deprecated(lambda: a[[0, 0]]) assert_not_deprecated(lambda: a[:, [0, 0]]) assert_not_deprecated(lambda: a[:, 0,:]) assert_not_deprecated(lambda: a[:,:,:]) def test_slicing(self): a = np.array([[5]]) def assert_deprecated(*args, **kwargs): self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs) # start as float. assert_deprecated(lambda: a[0.0:]) assert_deprecated(lambda: a[0:, 0.0:2]) assert_deprecated(lambda: a[0.0::2, :0]) assert_deprecated(lambda: a[0.0:1:2,:]) assert_deprecated(lambda: a[:, 0.0:]) # stop as float. assert_deprecated(lambda: a[:0.0]) assert_deprecated(lambda: a[:0, 1:2.0]) assert_deprecated(lambda: a[:0.0:2, :0]) assert_deprecated(lambda: a[:0.0,:]) assert_deprecated(lambda: a[:, 0:4.0:2]) # step as float. assert_deprecated(lambda: a[::1.0]) assert_deprecated(lambda: a[0:, :2:2.0]) assert_deprecated(lambda: a[1::4.0, :0]) assert_deprecated(lambda: a[::5.0,:]) assert_deprecated(lambda: a[:, 0:4:2.0]) # mixed. assert_deprecated(lambda: a[1.0:2:2.0], num=2) assert_deprecated(lambda: a[1.0::2.0], num=2) assert_deprecated(lambda: a[0:, :2.0:2.0], num=2) assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2) assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3) assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3) # should still get the DeprecationWarning if step = 0. assert_deprecated(lambda: a[::0.0], function_fails=True) def test_valid_slicing(self): a = np.array([[[5]]]) assert_not_deprecated = self.assert_not_deprecated assert_not_deprecated(lambda: a[::]) assert_not_deprecated(lambda: a[0:]) assert_not_deprecated(lambda: a[:2]) assert_not_deprecated(lambda: a[0:2]) assert_not_deprecated(lambda: a[::2]) assert_not_deprecated(lambda: a[1::2]) assert_not_deprecated(lambda: a[:2:2]) assert_not_deprecated(lambda: a[1:2:2]) def test_non_integer_argument_deprecations(self): a = np.array([[5]]) self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2) self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1))) self.assert_deprecated(np.take, args=(a, [0], 1.)) self.assert_deprecated(np.take, args=(a, [0], np.float64(1.))) class TestBooleanArgumentDeprecation(_DeprecationTestCase): """This tests that using a boolean as integer argument/indexing is deprecated. This should be kept in sync with TestFloatNonIntegerArgumentDeprecation and like it is handled in PyArray_PyIntAsIntp. """ message = "using a boolean instead of an integer " \ "will result in an error in the future" def test_bool_as_int_argument(self): a = np.array([[[1]]]) self.assert_deprecated(np.reshape, args=(a, (True, -1))) self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1))) # Note that operator.index(np.array(True)) does not work, a boolean # array is thus also deprecated, but not with the same message: assert_raises(TypeError, operator.index, np.array(True)) self.assert_deprecated(np.take, args=(a, [0], False)) self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3) self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError) self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError) class TestArrayToIndexDeprecation(_DeprecationTestCase): """This tests that creating an an index from an array is deprecated if the array is not 0d. This can probably be deprecated somewhat faster then the integer deprecations. The deprecation period started with NumPy 1.8. For deprecation this needs changing of array_index in number.c """ message = "converting an array with ndim \> 0 to an index will result " \ "in an error in the future" def test_array_to_index_deprecation(self): # This drops into the non-integer deprecation, which is ignored here, # so no exception is expected. The raising is effectively tested above. a = np.array([[[1]]]) self.assert_deprecated(operator.index, args=(np.array([1]),)) self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=()) self.assert_deprecated(np.take, args=(a, [0], a), exceptions=()) # Check slicing. Normal indexing checks arrays specifically. self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3) if __name__ == "__main__": run_module_suite()
mit
ejona86/grpc
bazel/test/python_test_repo/namespaced/upper/example/no_import_strip_test.py
10
1200
# Copyright 2020 the gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest class ImportTest(unittest.TestCase): def test_import(self): from namespaced_example_pb2 import NamespacedExample namespaced_example = NamespacedExample() namespaced_example.value = "hello" # Dummy assert, important part is namespaced example was imported. self.assertEqual(namespaced_example.value, "hello") def test_grpc(self): from namespaced_example_pb2_grpc import NamespacedServiceStub # No error from import self.assertEqual(1, 1) if __name__ == '__main__': logging.basicConfig() unittest.main()
apache-2.0
spring-week-topos/nova-week
nova/tests/cells/test_cells_manager.py
9
37769
# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellsManager """ import copy import datetime import mock from oslo.config import cfg from nova.cells import messaging from nova.cells import utils as cells_utils from nova import context from nova.openstack.common import timeutils from nova import test from nova.tests.cells import fakes from nova.tests import fake_instance_actions CONF = cfg.CONF CONF.import_opt('compute_topic', 'nova.compute.rpcapi') FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)] FAKE_SERVICES = [dict(id=1, host='host1', compute_node=[FAKE_COMPUTE_NODES[0]]), dict(id=2, host='host2', compute_node=[FAKE_COMPUTE_NODES[1]]), dict(id=3, host='host3', compute_node=[])] FAKE_TASK_LOGS = [dict(id=1, host='host1'), dict(id=2, host='host2')] class CellsManagerClassTestCase(test.NoDBTestCase): """Test case for CellsManager class.""" def setUp(self): super(CellsManagerClassTestCase, self).setUp() fakes.init(self) # pick a child cell to use for tests. self.our_cell = 'grandchild-cell1' self.cells_manager = fakes.get_cells_manager(self.our_cell) self.msg_runner = self.cells_manager.msg_runner self.state_manager = fakes.get_state_manager(self.our_cell) self.driver = self.cells_manager.driver self.ctxt = 'fake_context' def _get_fake_response(self, raw_response=None, exc=False): if exc: return messaging.Response('fake', test.TestingException(), True) if raw_response is None: raw_response = 'fake-response' return messaging.Response('fake', raw_response, False) def test_get_cell_info_for_neighbors(self): self.mox.StubOutWithMock(self.cells_manager.state_manager, 'get_cell_info_for_neighbors') self.cells_manager.state_manager.get_cell_info_for_neighbors() self.mox.ReplayAll() self.cells_manager.get_cell_info_for_neighbors(self.ctxt) def test_post_start_hook_child_cell(self): self.mox.StubOutWithMock(self.driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents') self.driver.start_servers(self.msg_runner) context.get_admin_context().AndReturn(self.ctxt) self.cells_manager._update_our_parents(self.ctxt) self.mox.ReplayAll() self.cells_manager.post_start_hook() def test_post_start_hook_middle_cell(self): cells_manager = fakes.get_cells_manager('child-cell2') msg_runner = cells_manager.msg_runner driver = cells_manager.driver self.mox.StubOutWithMock(driver, 'start_servers') self.mox.StubOutWithMock(context, 'get_admin_context') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capabilities') self.mox.StubOutWithMock(msg_runner, 'ask_children_for_capacities') driver.start_servers(msg_runner) context.get_admin_context().AndReturn(self.ctxt) msg_runner.ask_children_for_capabilities(self.ctxt) msg_runner.ask_children_for_capacities(self.ctxt) self.mox.ReplayAll() cells_manager.post_start_hook() def test_update_our_parents(self): self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capabilities') self.mox.StubOutWithMock(self.msg_runner, 'tell_parents_our_capacities') self.msg_runner.tell_parents_our_capabilities(self.ctxt) self.msg_runner.tell_parents_our_capacities(self.ctxt) self.mox.ReplayAll() self.cells_manager._update_our_parents(self.ctxt) def test_schedule_run_instance(self): host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed' self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance') our_cell = self.msg_runner.state_manager.get_my_state() self.msg_runner.schedule_run_instance(self.ctxt, our_cell, host_sched_kwargs) self.mox.ReplayAll() self.cells_manager.schedule_run_instance(self.ctxt, host_sched_kwargs=host_sched_kwargs) def test_build_instances(self): build_inst_kwargs = {'instances': [1, 2]} self.mox.StubOutWithMock(self.msg_runner, 'build_instances') our_cell = self.msg_runner.state_manager.get_my_state() self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs) self.mox.ReplayAll() self.cells_manager.build_instances(self.ctxt, build_inst_kwargs=build_inst_kwargs) def test_run_compute_api_method(self): # Args should just be silently passed through cell_name = 'fake-cell-name' method_info = 'fake-method-info' self.mox.StubOutWithMock(self.msg_runner, 'run_compute_api_method') fake_response = self._get_fake_response() self.msg_runner.run_compute_api_method(self.ctxt, cell_name, method_info, True).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.run_compute_api_method( self.ctxt, cell_name=cell_name, method_info=method_info, call=True) self.assertEqual('fake-response', response) def test_instance_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top') self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_update_at_top(self.ctxt, instance='fake-instance') def test_instance_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top') self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.instance_destroy_at_top(self.ctxt, instance='fake-instance') def test_instance_delete_everywhere(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_delete_everywhere') self.msg_runner.instance_delete_everywhere(self.ctxt, 'fake-instance', 'fake-type') self.mox.ReplayAll() self.cells_manager.instance_delete_everywhere( self.ctxt, instance='fake-instance', delete_type='fake-type') def test_instance_fault_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_fault_create_at_top') self.msg_runner.instance_fault_create_at_top(self.ctxt, 'fake-fault') self.mox.ReplayAll() self.cells_manager.instance_fault_create_at_top( self.ctxt, instance_fault='fake-fault') def test_bw_usage_update_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bw_usage_update_at_top') self.msg_runner.bw_usage_update_at_top(self.ctxt, 'fake-bw-info') self.mox.ReplayAll() self.cells_manager.bw_usage_update_at_top( self.ctxt, bw_update_info='fake-bw-info') def test_heal_instances(self): self.flags(instance_updated_at_threshold=1000, instance_update_num_instances=2, group='cells') fake_context = context.RequestContext('fake', 'fake') stalled_time = timeutils.utcnow() updated_since = stalled_time - datetime.timedelta(seconds=1000) def utcnow(): return stalled_time call_info = {'get_instances': 0, 'sync_instances': []} instances = ['instance1', 'instance2', 'instance3'] def get_instances_to_sync(context, **kwargs): self.assertEqual(context, fake_context) call_info['shuffle'] = kwargs.get('shuffle') call_info['project_id'] = kwargs.get('project_id') call_info['updated_since'] = kwargs.get('updated_since') call_info['get_instances'] += 1 return iter(instances) def instance_get_by_uuid(context, uuid): return instances[int(uuid[-1]) - 1] def sync_instance(context, instance): self.assertEqual(context, fake_context) call_info['sync_instances'].append(instance) self.stubs.Set(cells_utils, 'get_instances_to_sync', get_instances_to_sync) self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid', instance_get_by_uuid) self.stubs.Set(self.cells_manager, '_sync_instance', sync_instance) self.stubs.Set(timeutils, 'utcnow', utcnow) self.cells_manager._heal_instances(fake_context) self.assertEqual(call_info['shuffle'], True) self.assertIsNone(call_info['project_id']) self.assertEqual(call_info['updated_since'], updated_since) self.assertEqual(call_info['get_instances'], 1) # Only first 2 self.assertEqual(call_info['sync_instances'], instances[:2]) call_info['sync_instances'] = [] self.cells_manager._heal_instances(fake_context) self.assertEqual(call_info['shuffle'], True) self.assertIsNone(call_info['project_id']) self.assertEqual(call_info['updated_since'], updated_since) self.assertEqual(call_info['get_instances'], 2) # Now the last 1 and the first 1 self.assertEqual(call_info['sync_instances'], [instances[-1], instances[0]]) def test_sync_instances(self): self.mox.StubOutWithMock(self.msg_runner, 'sync_instances') self.msg_runner.sync_instances(self.ctxt, 'fake-project', 'fake-time', 'fake-deleted') self.mox.ReplayAll() self.cells_manager.sync_instances(self.ctxt, project_id='fake-project', updated_since='fake-time', deleted='fake-deleted') def test_service_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of services. # Manager should turn these into a single list of responses. for i in xrange(3): cell_name = 'path!to!cell%i' % i services = [] for service in FAKE_SERVICES: services.append(copy.deepcopy(service)) expected_service = copy.deepcopy(service) cells_utils.add_cell_to_service(expected_service, cell_name) expected_response.append(expected_service) response = messaging.Response(cell_name, services, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'service_get_all') self.msg_runner.service_get_all(self.ctxt, 'fake-filters').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.service_get_all(self.ctxt, filters='fake-filters') self.assertEqual(expected_response, response) def test_service_get_by_compute_host(self): self.mox.StubOutWithMock(self.msg_runner, 'service_get_by_compute_host') fake_cell = 'fake-cell' fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0], False) expected_response = copy.deepcopy(FAKE_SERVICES[0]) cells_utils.add_cell_to_service(expected_response, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.msg_runner.service_get_by_compute_host(self.ctxt, fake_cell, 'fake-host').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.service_get_by_compute_host(self.ctxt, host_name=cell_and_host) self.assertEqual(expected_response, response) def test_get_host_uptime(self): fake_cell = 'parent!fake-cell' fake_host = 'fake-host' fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host) host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:" " 0.20, 0.12, 0.14") fake_response = messaging.Response(fake_cell, host_uptime, False) self.mox.StubOutWithMock(self.msg_runner, 'get_host_uptime') self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\ AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.get_host_uptime(self.ctxt, fake_cell_and_host) self.assertEqual(host_uptime, response) def test_service_update(self): fake_cell = 'fake-cell' fake_response = messaging.Response( fake_cell, FAKE_SERVICES[0], False) expected_response = copy.deepcopy(FAKE_SERVICES[0]) cells_utils.add_cell_to_service(expected_response, fake_cell) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') params_to_update = {'disabled': True} self.mox.StubOutWithMock(self.msg_runner, 'service_update') self.msg_runner.service_update(self.ctxt, fake_cell, 'fake-host', 'nova-api', params_to_update).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.service_update( self.ctxt, host_name=cell_and_host, binary='nova-api', params_to_update=params_to_update) self.assertEqual(expected_response, response) def test_service_delete(self): fake_cell = 'fake-cell' service_id = '1' cell_service_id = cells_utils.cell_with_item(fake_cell, service_id) with mock.patch.object(self.msg_runner, 'service_delete') as service_delete: self.cells_manager.service_delete(self.ctxt, cell_service_id) service_delete.assert_called_once_with( self.ctxt, fake_cell, service_id) def test_proxy_rpc_to_manager(self): self.mox.StubOutWithMock(self.msg_runner, 'proxy_rpc_to_manager') fake_response = self._get_fake_response() cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') topic = "%s.%s" % (CONF.compute_topic, cell_and_host) self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell', 'fake-host', topic, 'fake-rpc-msg', True, -1).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.proxy_rpc_to_manager(self.ctxt, topic=topic, rpc_message='fake-rpc-msg', call=True, timeout=-1) self.assertEqual('fake-response', response) def _build_task_log_responses(self, num): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of task log # entries. Manager should turn these into a single list of # task log entries. for i in xrange(num): cell_name = 'path!to!cell%i' % i task_logs = [] for task_log in FAKE_TASK_LOGS: task_logs.append(copy.deepcopy(task_log)) expected_task_log = copy.deepcopy(task_log) cells_utils.add_cell_to_task_log(expected_task_log, cell_name) expected_response.append(expected_task_log) response = messaging.Response(cell_name, task_logs, False) responses.append(response) return expected_response, responses def test_task_log_get_all(self): expected_response, responses = self._build_task_log_responses(3) self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, None, 'fake-name', 'fake-begin', 'fake-end', host=None, state=None).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end') self.assertEqual(expected_response, response) def test_task_log_get_all_with_filters(self): expected_response, responses = self._build_task_log_responses(1) cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host') self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host='fake-host', state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_task_log_get_all_with_cell_but_no_host_filters(self): expected_response, responses = self._build_task_log_responses(1) # Host filter only has cell name. cell_and_host = 'fake-cell' self.mox.StubOutWithMock(self.msg_runner, 'task_log_get_all') self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell', 'fake-name', 'fake-begin', 'fake-end', host=None, state='fake-state').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.task_log_get_all(self.ctxt, task_name='fake-name', period_beginning='fake-begin', period_ending='fake-end', host=cell_and_host, state='fake-state') self.assertEqual(expected_response, response) def test_compute_node_get_all(self): responses = [] expected_response = [] # 3 cells... so 3 responses. Each response is a list of computes. # Manager should turn these into a single list of responses. for i in xrange(3): cell_name = 'path!to!cell%i' % i compute_nodes = [] for compute_node in FAKE_COMPUTE_NODES: compute_nodes.append(copy.deepcopy(compute_node)) expected_compute_node = copy.deepcopy(compute_node) cells_utils.add_cell_to_compute_node(expected_compute_node, cell_name) expected_response.append(expected_compute_node) response = messaging.Response(cell_name, compute_nodes, False) responses.append(response) self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get_all') self.msg_runner.compute_node_get_all(self.ctxt, hypervisor_match='fake-match').AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.compute_node_get_all(self.ctxt, hypervisor_match='fake-match') self.assertEqual(expected_response, response) def test_compute_node_stats(self): raw_resp1 = {'key1': 1, 'key2': 2} raw_resp2 = {'key2': 1, 'key3': 2} raw_resp3 = {'key3': 1, 'key4': 2} responses = [messaging.Response('cell1', raw_resp1, False), messaging.Response('cell2', raw_resp2, False), messaging.Response('cell2', raw_resp3, False)] expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2} self.mox.StubOutWithMock(self.msg_runner, 'compute_node_stats') self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses) self.mox.ReplayAll() response = self.cells_manager.compute_node_stats(self.ctxt) self.assertEqual(expected_resp, response) def test_compute_node_get(self): fake_cell = 'fake-cell' fake_response = messaging.Response(fake_cell, FAKE_COMPUTE_NODES[0], False) expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0]) cells_utils.add_cell_to_compute_node(expected_response, fake_cell) cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id') self.mox.StubOutWithMock(self.msg_runner, 'compute_node_get') self.msg_runner.compute_node_get(self.ctxt, 'fake-cell', 'fake-id').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.compute_node_get(self.ctxt, compute_id=cell_and_id) self.assertEqual(expected_response, response) def test_actions_get(self): fake_uuid = fake_instance_actions.FAKE_UUID fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1 fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response('fake-cell', [fake_act], False) expected_response = [fake_act] self.mox.StubOutWithMock(self.msg_runner, 'actions_get') self.msg_runner.actions_get(self.ctxt, 'fake-cell', 'fake-uuid').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.actions_get(self.ctxt, 'fake-cell', 'fake-uuid') self.assertEqual(expected_response, response) def test_action_get_by_request_id(self): fake_uuid = fake_instance_actions.FAKE_UUID fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1 fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id] fake_response = messaging.Response('fake-cell', fake_act, False) expected_response = fake_act self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id') self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_get_by_request_id(self.ctxt, 'fake-cell', 'fake-uuid', 'req-fake') self.assertEqual(expected_response, response) def test_action_events_get(self): fake_action_id = fake_instance_actions.FAKE_ACTION_ID1 fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id] fake_response = messaging.Response('fake-cell', fake_events, False) expected_response = fake_events self.mox.StubOutWithMock(self.msg_runner, 'action_events_get') self.msg_runner.action_events_get(self.ctxt, 'fake-cell', 'fake-action').AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell', 'fake-action') self.assertEqual(expected_response, response) def test_consoleauth_delete_tokens(self): instance_uuid = 'fake-instance-uuid' self.mox.StubOutWithMock(self.msg_runner, 'consoleauth_delete_tokens') self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid) self.mox.ReplayAll() self.cells_manager.consoleauth_delete_tokens(self.ctxt, instance_uuid=instance_uuid) def test_get_capacities(self): cell_name = 'cell_name' response = {"ram_free": {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}} self.mox.StubOutWithMock(self.state_manager, 'get_capacities') self.state_manager.get_capacities(cell_name).AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.get_capacities(self.ctxt, cell_name)) def test_validate_console_port(self): instance_uuid = 'fake-instance-uuid' cell_name = 'fake-cell-name' instance = {'cell_name': cell_name} console_port = 'fake-console-port' console_type = 'fake-console-type' self.mox.StubOutWithMock(self.msg_runner, 'validate_console_port') self.mox.StubOutWithMock(self.cells_manager.db, 'instance_get_by_uuid') fake_response = self._get_fake_response() self.cells_manager.db.instance_get_by_uuid(self.ctxt, instance_uuid).AndReturn(instance) self.msg_runner.validate_console_port(self.ctxt, cell_name, instance_uuid, console_port, console_type).AndReturn(fake_response) self.mox.ReplayAll() response = self.cells_manager.validate_console_port(self.ctxt, instance_uuid=instance_uuid, console_port=console_port, console_type=console_type) self.assertEqual('fake-response', response) def test_bdm_update_or_create_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_update_or_create_at_top') self.msg_runner.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') self.mox.ReplayAll() self.cells_manager.bdm_update_or_create_at_top(self.ctxt, 'fake-bdm', create='foo') def test_bdm_destroy_at_top(self): self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top') self.msg_runner.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') self.mox.ReplayAll() self.cells_manager.bdm_destroy_at_top(self.ctxt, 'fake_instance_uuid', device_name='fake_device_name', volume_id='fake_volume_id') def test_get_migrations(self): filters = {'status': 'confirmed'} cell1_migrations = [{'id': 123}] cell2_migrations = [{'id': 456}] fake_responses = [self._get_fake_response(cell1_migrations), self._get_fake_response(cell2_migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, None, False, filters).\ AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response) def test_get_migrations_for_a_given_cell(self): filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'} target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name']) migrations = [{'id': 123}] fake_responses = [self._get_fake_response(migrations)] self.mox.StubOutWithMock(self.msg_runner, 'get_migrations') self.msg_runner.get_migrations(self.ctxt, target_cell, False, filters).AndReturn(fake_responses) self.mox.ReplayAll() response = self.cells_manager.get_migrations(self.ctxt, filters) self.assertEqual(migrations, response) def test_instance_update_from_api(self): self.mox.StubOutWithMock(self.msg_runner, 'instance_update_from_api') self.msg_runner.instance_update_from_api(self.ctxt, 'fake-instance', 'exp_vm', 'exp_task', 'admin_reset') self.mox.ReplayAll() self.cells_manager.instance_update_from_api( self.ctxt, instance='fake-instance', expected_vm_state='exp_vm', expected_task_state='exp_task', admin_state_reset='admin_reset') def test_start_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'start_instance') self.msg_runner.start_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.start_instance(self.ctxt, instance='fake-instance') def test_stop_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'stop_instance') self.msg_runner.stop_instance(self.ctxt, 'fake-instance', do_cast='meow') self.mox.ReplayAll() self.cells_manager.stop_instance(self.ctxt, instance='fake-instance', do_cast='meow') def test_cell_create(self): values = 'values' response = 'created_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_create') self.state_manager.cell_create(self.ctxt, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_create(self.ctxt, values)) def test_cell_update(self): cell_name = 'cell_name' values = 'values' response = 'updated_cell' self.mox.StubOutWithMock(self.state_manager, 'cell_update') self.state_manager.cell_update(self.ctxt, cell_name, values).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_update(self.ctxt, cell_name, values)) def test_cell_delete(self): cell_name = 'cell_name' response = 1 self.mox.StubOutWithMock(self.state_manager, 'cell_delete') self.state_manager.cell_delete(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_delete(self.ctxt, cell_name)) def test_cell_get(self): cell_name = 'cell_name' response = 'cell_info' self.mox.StubOutWithMock(self.state_manager, 'cell_get') self.state_manager.cell_get(self.ctxt, cell_name).\ AndReturn(response) self.mox.ReplayAll() self.assertEqual(response, self.cells_manager.cell_get(self.ctxt, cell_name)) def test_reboot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance') self.msg_runner.reboot_instance(self.ctxt, 'fake-instance', 'HARD') self.mox.ReplayAll() self.cells_manager.reboot_instance(self.ctxt, instance='fake-instance', reboot_type='HARD') def test_suspend_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance') self.msg_runner.suspend_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.suspend_instance(self.ctxt, instance='fake-instance') def test_resume_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'resume_instance') self.msg_runner.resume_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.resume_instance(self.ctxt, instance='fake-instance') def test_terminate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance') self.msg_runner.terminate_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.terminate_instance(self.ctxt, instance='fake-instance') def test_soft_delete_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance') self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.soft_delete_instance(self.ctxt, instance='fake-instance') def test_resize_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'resize_instance') self.msg_runner.resize_instance(self.ctxt, 'fake-instance', 'fake-flavor', 'fake-updates') self.mox.ReplayAll() self.cells_manager.resize_instance( self.ctxt, instance='fake-instance', flavor='fake-flavor', extra_instance_updates='fake-updates') def test_live_migrate_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance') self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance', 'fake-block', 'fake-commit', 'fake-host') self.mox.ReplayAll() self.cells_manager.live_migrate_instance( self.ctxt, instance='fake-instance', block_migration='fake-block', disk_over_commit='fake-commit', host_name='fake-host') def test_revert_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'revert_resize') self.msg_runner.revert_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.revert_resize(self.ctxt, instance='fake-instance') def test_confirm_resize(self): self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize') self.msg_runner.confirm_resize(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance') def test_reset_network(self): self.mox.StubOutWithMock(self.msg_runner, 'reset_network') self.msg_runner.reset_network(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.reset_network(self.ctxt, instance='fake-instance') def test_inject_network_info(self): self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info') self.msg_runner.inject_network_info(self.ctxt, 'fake-instance') self.mox.ReplayAll() self.cells_manager.inject_network_info(self.ctxt, instance='fake-instance') def test_snapshot_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance') self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance', 'fake-id') self.mox.ReplayAll() self.cells_manager.snapshot_instance(self.ctxt, instance='fake-instance', image_id='fake-id') def test_backup_instance(self): self.mox.StubOutWithMock(self.msg_runner, 'backup_instance') self.msg_runner.backup_instance(self.ctxt, 'fake-instance', 'fake-id', 'backup-type', 'rotation') self.mox.ReplayAll() self.cells_manager.backup_instance(self.ctxt, instance='fake-instance', image_id='fake-id', backup_type='backup-type', rotation='rotation')
apache-2.0
eternalthinker/flask-server-rq-example
venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
87
2715
import hashlib import os from pip._vendor.lockfile import FileLock from ..cache import BaseCache def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise class FileCache(BaseCache): def __init__(self, directory, forever=False, filemode=0o0600, dirmode=0o0700): self.directory = directory self.forever = forever self.filemode = filemode self.dirmode = dirmode @staticmethod def encode(x): return hashlib.sha224(x.encode()).hexdigest() def _fn(self, name): hashed = self.encode(name) parts = list(hashed[:5]) + [hashed] return os.path.join(self.directory, *parts) def get(self, key): name = self._fn(key) if not os.path.exists(name): return None with open(name, 'rb') as fh: return fh.read() def set(self, key, value): name = self._fn(key) # Make sure the directory exists try: os.makedirs(os.path.dirname(name), self.dirmode) except (IOError, OSError): pass with FileLock(name) as lock: # Write our actual file with _secure_open_write(lock.path, self.filemode) as fh: fh.write(value) def delete(self, key): name = self._fn(key) if not self.forever: os.remove(name)
apache-2.0
kadircet/CENG
783/HW2/cs231n/gradient_check.py
1
3423
import numpy as np from random import randrange def eval_numerical_gradient(f, x, verbose=False, h=0.00001): """ a naive implementation of numerical gradient of f at x - f should be a function that takes a single argument - x is the point (numpy array) to evaluate the gradient at """ fx = f(x) # evaluate function value at original point grad = np.zeros(x.shape) # iterate over all indexes in x it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: # evaluate function at x+h ix = it.multi_index x[ix] += h # increment by h fxh = f(x) # evalute f(x + h) x[ix] -= h # restore to previous value (very important!) # compute the partial derivative grad[ix] = (fxh - fx) / h # the slope if verbose: print ix, grad[ix] it.iternext() # step to next dimension return grad def grad_check_sparse(f, x, analytic_grad, num_checks): """ sample a few random elements and only return numerical in this dimensions. """ h = 1e-5 x.shape for i in xrange(num_checks): ix = tuple([randrange(m) for m in x.shape]) x[ix] += h # increment by h fxph = f(x) # evaluate f(x + h) x[ix] -= 2 * h # increment by h fxmh = f(x) # evaluate f(x - h) x[ix] += h # reset grad_numerical = (fxph - fxmh) / (2 * h) grad_analytic = analytic_grad[ix] rel_error = abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic)) print 'numerical: %f analytic: %f, relative error: %e' % (grad_numerical, grad_analytic, rel_error) def eval_numerical_gradient_array(f, x, df, h=1e-5): """ Evaluate a numeric gradient for a function that accepts a numpy array and returns a numpy array. """ grad = np.zeros_like(x) it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: ix = it.multi_index oldval = x[ix] x[ix] = oldval + h pos = f(x) x[ix] = oldval - h neg = f(x) x[ix] = oldval grad[ix] = np.sum((pos - neg) * df) / (2 * h) it.iternext() return grad def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5): """ Compute numeric gradients for a function that operates on input and output blobs. We assume that f accepts several input blobs as arguments, followed by a blob into which outputs will be written. For example, f might be called like this: f(x, w, out) where x and w are input Blobs, and the result of f will be written to out. Inputs: - f: function - inputs: tuple of input blobs - output: output blob - h: step size """ numeric_diffs = [] for input_blob in inputs: diff = np.zeros_like(input_blob.diffs) it = np.nditer(input_blob.vals, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index orig = input_blob.vals[idx] input_blob.vals[idx] = orig + h f(*(inputs + (output,))) pos = np.copy(output.vals) input_blob.vals[idx] = orig - h f(*(inputs + (output,))) neg = np.copy(output.vals) input_blob.vals[idx] = orig diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h) it.iternext() numeric_diffs.append(diff) return numeric_diffs def eval_numerical_gradient_net(net, inputs, output, h=1e-5): return eval_numerical_gradient_blobs(lambda *args: net.forward(), inputs, output, h=h)
gpl-3.0
rahushen/ansible
lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_facts.py
23
4647
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: ec2_vpc_nat_gateway_facts short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods. description: - Gets various details related to AWS VPC Managed Nat Gateways version_added: "2.3" requirements: [ boto3 ] options: nat_gateway_ids: description: - Get details of specific nat gateway IDs required: false default: None filters: description: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html) for possible filters. required: false default: None author: Karen Cheng(@Etherdaemon) extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Simple example of listing all nat gateways - name: List all managed nat gateways in ap-southeast-2 ec2_vpc_nat_gateway_facts: region: ap-southeast-2 register: all_ngws - name: Debugging the result debug: msg: "{{ all_ngws.result }}" - name: Get details on specific nat gateways ec2_vpc_nat_gateway_facts: nat_gateway_ids: - nat-1234567891234567 - nat-7654321987654321 region: ap-southeast-2 register: specific_ngws - name: Get all nat gateways with specific filters ec2_vpc_nat_gateway_facts: region: ap-southeast-2 filters: state: ['pending'] register: pending_ngws - name: Get nat gateways with specific filter ec2_vpc_nat_gateway_facts: region: ap-southeast-2 filters: subnet-id: subnet-12345678 state: ['available'] register: existing_nat_gateways ''' RETURN = ''' result: description: The result of the describe, converted to ansible snake case style. See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response. returned: success type: list ''' import json try: import botocore except ImportError: pass # will be detected by imported HAS_BOTO3 from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, HAS_BOTO3) def date_handler(obj): return obj.isoformat() if hasattr(obj, 'isoformat') else obj def get_nat_gateways(client, module, nat_gateway_id=None): params = dict() nat_gateways = list() params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) params['NatGatewayIds'] = module.params.get('nat_gateway_ids') try: result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler)) except Exception as e: module.fail_json(msg=str(e.message)) for gateway in result['NatGateways']: # Turn the boto3 result into ansible_friendly_snaked_names converted_gateway = camel_dict_to_snake_dict(gateway) if 'tags' in converted_gateway: # Turn the boto3 result into ansible friendly tag dictionary converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) nat_gateways.append(converted_gateway) return nat_gateways def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters=dict(default={}, type='dict'), nat_gateway_ids=dict(default=[], type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore/boto3 is required.') try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=str(e)) results = get_nat_gateways(connection, module) module.exit_json(result=results) if __name__ == '__main__': main()
gpl-3.0
linked67/p2pool-cannabiscoin
p2pool/util/memoize.py
281
1737
import itertools class LRUDict(object): def __init__(self, n): self.n = n self.inner = {} self.counter = itertools.count() def get(self, key, default=None): if key in self.inner: x, value = self.inner[key] self.inner[key] = self.counter.next(), value return value return default def __setitem__(self, key, value): self.inner[key] = self.counter.next(), value while len(self.inner) > self.n: self.inner.pop(min(self.inner, key=lambda k: self.inner[k][0])) _nothing = object() def memoize_with_backing(backing, has_inverses=set()): def a(f): def b(*args): res = backing.get((f, args), _nothing) if res is not _nothing: return res res = f(*args) backing[(f, args)] = res for inverse in has_inverses: backing[(inverse, args[:-1] + (res,))] = args[-1] return res return b return a def memoize(f): return memoize_with_backing({})(f) class cdict(dict): def __init__(self, func): dict.__init__(self) self._func = func def __missing__(self, key): value = self._func(key) self[key] = value return value def fast_memoize_single_arg(func): return cdict(func).__getitem__ class cdict2(dict): def __init__(self, func): dict.__init__(self) self._func = func def __missing__(self, key): value = self._func(*key) self[key] = value return value def fast_memoize_multiple_args(func): f = cdict2(func).__getitem__ return lambda *args: f(args)
gpl-3.0
jmesteve/saas3
openerp/addons/procurement/wizard/schedulers_all.py
3
2883
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import threading from openerp.osv import fields, osv class procurement_compute_all(osv.osv_memory): _name = 'procurement.order.compute.all' _description = 'Compute all schedulers' _columns = { 'automatic': fields.boolean('Automatic orderpoint',help='Triggers an automatic procurement for all products that have a virtual stock under 0. You should probably not use this option, we suggest using a MTO configuration on products.'), } _defaults = { 'automatic': lambda *a: False, } def _procure_calculation_all(self, cr, uid, ids, context=None): """ @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary """ proc_obj = self.pool.get('procurement.order') #As this function is in a new thread, i need to open a new cursor, because the old one may be closed new_cr = self.pool.db.cursor() for proc in self.browse(new_cr, uid, ids, context=context): proc_obj.run_scheduler(new_cr, uid, automatic=proc.automatic, use_new_cursor=new_cr.dbname,\ context=context) #close the new cursor new_cr.close() return {} def procure_calculation(self, cr, uid, ids, context=None): """ @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary """ threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context)) threaded_calculation.start() return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
alajara/servo
tests/wpt/web-platform-tests/old-tests/webdriver/modal/alerts_test.py
141
6347
import os import sys import unittest sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../.."))) import base_test from selenium.common import exceptions from selenium.webdriver.support import wait class AlertsTest(base_test.WebDriverBaseTest): def setUp(self): self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions = [exceptions.NoAlertPresentException]) self.driver.get(self.webserver.where_is('modal/res/alerts.html')) def tearDown(self): try: self.driver.switch_to_alert().dismiss() except exceptions.NoAlertPresentException: pass # Alerts def test_should_allow_user_to_accept_an_alert(self): self.driver.find_element_by_css_selector('#alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() self.driver.current_url def test_should_allow_user_to_accept_an_alert_with_no_text(self): self.driver.find_element_by_css_selector('#empty-alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() self.driver.current_url def test_should_allow_user_to_dismiss_an_alert(self): self.driver.find_element_by_css_selector('#alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.dismiss() self.driver.current_url def test_should_allow_user_to_get_text_of_an_alert(self): self.driver.find_element_by_css_selector('#alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.text alert.accept() self.assertEquals('cheese', value) def test_setting_the_value_of_an_alert_throws(self): self.driver.find_element_by_css_selector('#alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) with self.assertRaises(exceptions.ElementNotVisibleException): alert.send_keys('cheese') alert.accept() def test_alert_should_not_allow_additional_commands_if_dismissed(self): self.driver.find_element_by_css_selector('#alert').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() with self.assertRaises(exceptions.NoAlertPresentException): alert.text # Prompts def test_should_allow_user_to_accept_a_prompt(self): self.driver.find_element_by_css_selector('#prompt').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == '') def test_should_allow_user_to_dismiss_a_prompt(self): self.driver.find_element_by_css_selector('#prompt').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.dismiss() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'null') def test_should_allow_user_to_set_the_value_of_a_prompt(self): self.driver.find_element_by_css_selector('#prompt').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.send_keys('cheese') alert.accept() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'cheese') def test_should_allow_user_to_get_text_of_a_prompt(self): self.driver.find_element_by_css_selector('#prompt').click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.text alert.accept() self.assertEquals('Enter something', value) def test_prompt_should_not_allow_additional_commands_if_dismissed(self): self.driver.find_element_by_css_selector('#prompt').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() with self.assertRaises(exceptions.NoAlertPresentException): alert.text def test_prompt_should_use_default_value_if_no_keys_sent(self): self.driver.find_element_by_css_selector('#prompt-with-default').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'This is a default value') def test_prompt_should_have_null_value_if_dismissed(self): self.driver.find_element_by_css_selector('#prompt-with-default').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.dismiss() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'null') # Confirmations def test_should_allow_user_to_accept_a_confirm(self): self.driver.find_element_by_css_selector('#confirm').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'true') def test_should_allow_user_to_dismiss_a_confirm(self): self.driver.find_element_by_css_selector('#confirm').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.dismiss() self.wait.until(lambda x: x.find_element_by_css_selector('#text').text == 'false') def test_setting_the_value_of_a_confirm_throws(self): self.driver.find_element_by_css_selector('#confirm').click() alert = self.wait.until(lambda x: x.switch_to_alert()) with self.assertRaises(exceptions.ElementNotVisibleException): alert.send_keys('cheese') alert.accept() def test_should_allow_user_to_get_text_of_a_confirm(self): self.driver.find_element_by_css_selector('#confirm').click() alert = self.wait.until(lambda x: x.switch_to_alert()) value = alert.text alert.accept() self.assertEquals('cheese', value) def test_confirm_should_not_allow_additional_commands_if_dismissed(self): self.driver.find_element_by_css_selector('#confirm').click() alert = self.wait.until(lambda x: x.switch_to_alert()) alert.accept() with self.assertRaises(exceptions.NoAlertPresentException): alert.text """ def test_switch_to_missing_alert_fails(self): with self.assertRaises(exceptions.NoAlertPresentException): self.driver.switch_to_alert() """ if __name__ == '__main__': unittest.main()
mpl-2.0
40223151/2014c2g9
w2/static/Brython2.0.0-20140209-164925/Lib/colorsys.py
1066
3691
"""Conversion functions between RGB and other color systems. This modules provides two functions for each color system ABC: rgb_to_abc(r, g, b) --> a, b, c abc_to_rgb(a, b, c) --> r, g, b All inputs and outputs are triples of floats in the range [0.0...1.0] (with the exception of I and Q, which covers a slightly larger range). Inputs outside the valid range may cause exceptions or invalid outputs. Supported color systems: RGB: Red, Green, Blue components YIQ: Luminance, Chrominance (used by composite video signals) HLS: Hue, Luminance, Saturation HSV: Hue, Saturation, Value """ # References: # http://en.wikipedia.org/wiki/YIQ # http://en.wikipedia.org/wiki/HLS_color_space # http://en.wikipedia.org/wiki/HSV_color_space __all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", "rgb_to_hsv","hsv_to_rgb"] # Some floating point constants ONE_THIRD = 1.0/3.0 ONE_SIXTH = 1.0/6.0 TWO_THIRD = 2.0/3.0 # YIQ: used by composite video signals (linear combinations of RGB) # Y: perceived grey level (0.0 == black, 1.0 == white) # I, Q: color components def rgb_to_yiq(r, g, b): y = 0.30*r + 0.59*g + 0.11*b i = 0.60*r - 0.28*g - 0.32*b q = 0.21*r - 0.52*g + 0.31*b return (y, i, q) def yiq_to_rgb(y, i, q): r = y + 0.948262*i + 0.624013*q g = y - 0.276066*i - 0.639810*q b = y - 1.105450*i + 1.729860*q if r < 0.0: r = 0.0 if g < 0.0: g = 0.0 if b < 0.0: b = 0.0 if r > 1.0: r = 1.0 if g > 1.0: g = 1.0 if b > 1.0: b = 1.0 return (r, g, b) # HLS: Hue, Luminance, Saturation # H: position in the spectrum # L: color lightness # S: color saturation def rgb_to_hls(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) # XXX Can optimize (maxc+minc) and (maxc-minc) l = (minc+maxc)/2.0 if minc == maxc: return 0.0, l, 0.0 if l <= 0.5: s = (maxc-minc) / (maxc+minc) else: s = (maxc-minc) / (2.0-maxc-minc) rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, l, s def hls_to_rgb(h, l, s): if s == 0.0: return l, l, l if l <= 0.5: m2 = l * (1.0+s) else: m2 = l+s-(l*s) m1 = 2.0*l - m2 return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) def _v(m1, m2, hue): hue = hue % 1.0 if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0 if hue < 0.5: return m2 if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 return m1 # HSV: Hue, Saturation, Value # H: position in the spectrum # S: color saturation ("purity") # V: color brightness def rgb_to_hsv(r, g, b): maxc = max(r, g, b) minc = min(r, g, b) v = maxc if minc == maxc: return 0.0, 0.0, v s = (maxc-minc) / maxc rc = (maxc-r) / (maxc-minc) gc = (maxc-g) / (maxc-minc) bc = (maxc-b) / (maxc-minc) if r == maxc: h = bc-gc elif g == maxc: h = 2.0+rc-bc else: h = 4.0+gc-rc h = (h/6.0) % 1.0 return h, s, v def hsv_to_rgb(h, s, v): if s == 0.0: return v, v, v i = int(h*6.0) # XXX assume int() truncates! f = (h*6.0) - i p = v*(1.0 - s) q = v*(1.0 - s*f) t = v*(1.0 - s*(1.0-f)) i = i%6 if i == 0: return v, t, p if i == 1: return q, v, p if i == 2: return p, v, t if i == 3: return p, q, v if i == 4: return t, p, v if i == 5: return v, p, q # Cannot get here
gpl-2.0
ex1usive-m4d/TemplateDocx
controllers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/encodings/cp874.py
593
12851
""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp874', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u20ac' # 0x80 -> EURO SIGN u'\ufffe' # 0x81 -> UNDEFINED u'\ufffe' # 0x82 -> UNDEFINED u'\ufffe' # 0x83 -> UNDEFINED u'\ufffe' # 0x84 -> UNDEFINED u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS u'\ufffe' # 0x86 -> UNDEFINED u'\ufffe' # 0x87 -> UNDEFINED u'\ufffe' # 0x88 -> UNDEFINED u'\ufffe' # 0x89 -> UNDEFINED u'\ufffe' # 0x8A -> UNDEFINED u'\ufffe' # 0x8B -> UNDEFINED u'\ufffe' # 0x8C -> UNDEFINED u'\ufffe' # 0x8D -> UNDEFINED u'\ufffe' # 0x8E -> UNDEFINED u'\ufffe' # 0x8F -> UNDEFINED u'\ufffe' # 0x90 -> UNDEFINED u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK u'\u2022' # 0x95 -> BULLET u'\u2013' # 0x96 -> EN DASH u'\u2014' # 0x97 -> EM DASH u'\ufffe' # 0x98 -> UNDEFINED u'\ufffe' # 0x99 -> UNDEFINED u'\ufffe' # 0x9A -> UNDEFINED u'\ufffe' # 0x9B -> UNDEFINED u'\ufffe' # 0x9C -> UNDEFINED u'\ufffe' # 0x9D -> UNDEFINED u'\ufffe' # 0x9E -> UNDEFINED u'\ufffe' # 0x9F -> UNDEFINED u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA u'\u0e24' # 0xC4 -> THAI CHARACTER RU u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING u'\u0e26' # 0xC6 -> THAI CHARACTER LU u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU u'\ufffe' # 0xDB -> UNDEFINED u'\ufffe' # 0xDC -> UNDEFINED u'\ufffe' # 0xDD -> UNDEFINED u'\ufffe' # 0xDE -> UNDEFINED u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN u'\u0e50' # 0xF0 -> THAI DIGIT ZERO u'\u0e51' # 0xF1 -> THAI DIGIT ONE u'\u0e52' # 0xF2 -> THAI DIGIT TWO u'\u0e53' # 0xF3 -> THAI DIGIT THREE u'\u0e54' # 0xF4 -> THAI DIGIT FOUR u'\u0e55' # 0xF5 -> THAI DIGIT FIVE u'\u0e56' # 0xF6 -> THAI DIGIT SIX u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT u'\u0e59' # 0xF9 -> THAI DIGIT NINE u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT u'\ufffe' # 0xFC -> UNDEFINED u'\ufffe' # 0xFD -> UNDEFINED u'\ufffe' # 0xFE -> UNDEFINED u'\ufffe' # 0xFF -> UNDEFINED ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
iulian787/spack
var/spack/repos/builtin/packages/pangomm/package.py
5
2457
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Pangomm(AutotoolsPackage): """Pangomm is a C++ interface to Pango.""" homepage = "http://www.pango.org/" url = "https://ftp.gnome.org/pub/GNOME/sources/pangomm/2.14/pangomm-2.14.1.tar.gz" version('2.43.1', sha256='00483967b4ed0869da09dc0617de45625b9ab846c7b07aa25dfc940a4fc540a4') version('2.42.0', sha256='ca6da067ff93a6445780c0b4b226eb84f484ab104b8391fb744a45cbc7edbf56') version('2.41.5', sha256='5131830d5b37b181ca4fa8f641ad86faa985c0bb7dcc833c98672d294367b304') version('2.40.2', sha256='0a97aa72513db9088ca3034af923484108746dba146e98ed76842cf858322d05') version('2.39.1', sha256='10c06bbf12a03963ffe9c697887b57c72f1dac1671d09dba45cecd25db5dc6ed') version('2.38.1', sha256='effb18505b36d81fc32989a39ead8b7858940d0533107336a30bc3eef096bc8b') version('2.37.2', sha256='bb83d769f4d4256e0b108e84a4f0441065da8483c7cc51518b0634668ed094f5') version('2.36.0', sha256='a8d96952c708d7726bed260d693cece554f8f00e48b97cccfbf4f5690b6821f0') version('2.35.1', sha256='3eb4d11014d09627b2b7c532c65b54fa182905b4c9688901ae11cdfb506dbc55') version('2.34.0', sha256='0e82bbff62f626692a00f3772d8b17169a1842b8cc54d5f2ddb1fec2cede9e41') version('2.28.4', sha256='778dcb66a793cbfd52a9f92ee5a71b888c3603a913fc1ddc48a30204de6d6c82') version('2.27.1', sha256='0d707b4a9e632223f7f27215f83fff679166cc89b9b7f209e7fe049af7b4562e') version('2.26.3', sha256='4f68e4d2d4b6c4ae82327ebd9e69f2cbc4379e502d12856c36943399b87d71a2') version('2.25.1', sha256='25684058138050a35ebb4f4e13899aea12045dfb00cc351dfe78f01cb1a1f21c') version('2.24.0', sha256='24c7b8782b8986fa8f6224ac1e5f1a02412b7d8bc21b53d14d6df9c7d9b59a3f') version('2.14.1', sha256='2ea6cee273cca1aae2ee5a5dac0c416b4dc354e46debb51f20c6eeba828f5ed5') version('2.14.0', sha256='baa3b231c9498fb1140254e3feb4eb93c638f07e6e26ae0e36c3699ec14d80fd') depends_on('pango') depends_on('glibmm') depends_on('cairomm') def url_for_version(self, version): """Handle version-based custom URLs.""" url = "https://ftp.acc.umu.se/pub/GNOME/sources/pangomm" ext = '.tar.gz' if version < Version('2.28.3') else '.tar.xz' return url + "/%s/pangomm-%s%s" % (version.up_to(2), version, ext)
lgpl-2.1
icebreaker/dotfiles
gnome/gnome2/gedit/plugins.symlink/rails_extract_partial.py
1
5285
# -*- coding: utf8 -*- # vim: ts=4 nowrap expandtab textwidth=80 # Rails Extract Partial Plugin # Copyright © 2008 Alexandre da Silva / Carlos Antonio da Silva # # This file is part of Gmate. # # See LICENTE.TXT for licence information import gedit import gtk import gnomevfs import os.path class ExtractPartialPlugin(gedit.Plugin): ui_str = """ <ui> <menubar name="MenuBar"> <menu name="EditMenu" action="Edit"> <placeholder name="EditOps_6"> <menuitem action="ExtractPartial"/> </placeholder> </menu> </menubar> </ui> """ # bookmarks = {} def __init__(self): gedit.Plugin.__init__(self) def activate(self, window): self.__window = window actions = [('ExtractPartial', None, 'Extract Partial', '<Alt><Control>p', 'Extract select text to a partial', self.extract_partial)] windowdata = dict() window.set_data("ExtractPartialPluginWindowDataKey", windowdata) windowdata["action_group"] = gtk.ActionGroup("GeditExtractPartialPluginActions") windowdata["action_group"].add_actions(actions, window) manager = window.get_ui_manager() manager.insert_action_group(windowdata["action_group"], -1) windowdata["ui_id"] = manager.add_ui_from_string(self.ui_str) window.set_data("ExtractPartialPluginInfo", windowdata) def deactivate(self, window): windowdata = window.get_data("ExtractPartialPluginWindowDataKey") manager = window.get_ui_manager() manager.remove_ui(windowdata["ui_id"]) manager.remove_action_group(windowdata["action_group"]) def update_ui(self, window): view = window.get_active_view() windowdata = window.get_data("ExtractPartialPluginWindowDataKey") windowdata["action_group"].set_sensitive(bool(view and view.get_editable())) def create_file(self, window, file_uri, text): window.create_tab_from_uri(str(file_uri), gedit.encoding_get_current(), 0, True, True) view = window.get_active_view() buf = view.get_buffer() doc = window.get_active_document() doc.begin_user_action() buf.insert_interactive_at_cursor(text, True) doc.end_user_action() def extract_partial(self, action, window): doc = window.get_active_document() view = window.get_active_view() buf = view.get_buffer() language = buf.get_language() # Only RHTML if language.get_id() != 'rhtml' and language.get_id() != 'haml': return str_uri = doc.get_uri() if buf.get_has_selection(): if str_uri: uri = gnomevfs.URI(str_uri) if uri: path = uri.scheme + '://' + uri.dirname dialog = gtk.Dialog("Enter partial Name", window, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) dialog.set_alternative_button_order([gtk.RESPONSE_ACCEPT, gtk.RESPONSE_CANCEL]) dialog.vbox.pack_start(gtk.Label("Don't use _ nor extension(html.erb/erb/rhtml)")) entry = gtk.Entry() entry.connect('key-press-event', self.__dialog_key_press, dialog) dialog.vbox.pack_start(entry) dialog.show_all() response = dialog.run() if response == gtk.RESPONSE_ACCEPT: partial_name = entry.get_text() doc_name = doc.get_short_name_for_display() extension = self.__get_file_extension(doc_name) itstart, itend = doc.get_selection_bounds() partial_text = doc.get_slice(itstart, itend, True) if language.get_id() == 'haml': partial_render = '= render :partial => "%s"' % partial_name else: partial_render = '<%%= render :partial => "%s" %%>' % partial_name doc.begin_user_action() doc.delete(itstart, itend) doc.insert_interactive(itstart, partial_render, True) doc.end_user_action() file_name = "%s/_%s%s" % (path, partial_name, extension) self.create_file(window, file_name, partial_text) dialog.destroy() else: return def __get_file_extension(self, doc_name): name, ext = os.path.splitext(doc_name) if ext == '.rhtml': return ext if ext == '.erb': name, ext = os.path.splitext(name) return "%s.erb" % ext if ext == '.haml': name, ext = os.path.splitext(name) return "%s.haml" % ext return '.html.erb' def __dialog_key_press(self, widget, event, dialog): if event.keyval == 65293: dialog.response(gtk.RESPONSE_ACCEPT)
mit
Donkyhotay/MoonPy
zope/tal/talgenerator.py
1
33082
############################################################################## # # Copyright (c) 2001, 2002 Zope Corporation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Code generator for TALInterpreter intermediate code. $Id: talgenerator.py 39119 2005-10-13 19:20:18Z fdrake $ """ import cgi import re from zope.tal import taldefs from zope.tal.taldefs import NAME_RE, TAL_VERSION from zope.tal.taldefs import I18NError, METALError, TALError from zope.tal.taldefs import parseSubstitution from zope.tal.translationcontext import TranslationContext, DEFAULT_DOMAIN _name_rx = re.compile(NAME_RE) class TALGenerator(object): inMacroUse = 0 inMacroDef = 0 source_file = None def __init__(self, expressionCompiler=None, xml=1, source_file=None): if not expressionCompiler: from zope.tal.dummyengine import DummyEngine expressionCompiler = DummyEngine() self.expressionCompiler = expressionCompiler self.CompilerError = expressionCompiler.getCompilerError() # This holds the emitted opcodes representing the input self.program = [] # The program stack for when we need to do some sub-evaluation for an # intermediate result. E.g. in an i18n:name tag for which the # contents describe the ${name} value. self.stack = [] # Another stack of postponed actions. Elements on this stack are a # dictionary; key/values contain useful information that # emitEndElement needs to finish its calculations self.todoStack = [] self.macros = {} # {slot-name --> default content program} self.slots = {} self.slotStack = [] self.xml = xml # true --> XML, false --> HTML self.emit("version", TAL_VERSION) self.emit("mode", xml and "xml" or "html") if source_file is not None: self.source_file = source_file self.emit("setSourceFile", source_file) self.i18nContext = TranslationContext() self.i18nLevel = 0 def getCode(self): assert not self.stack assert not self.todoStack return self.optimize(self.program), self.macros def optimize(self, program): output = [] collect = [] cursor = 0 for cursor in xrange(len(program)+1): try: item = program[cursor] except IndexError: item = (None, None) opcode = item[0] if opcode == "rawtext": collect.append(item[1]) continue if opcode == "endTag": collect.append("</%s>" % item[1]) continue if opcode == "startTag": if self.optimizeStartTag(collect, item[1], item[2], ">"): continue if opcode == "startEndTag": endsep = self.xml and "/>" or " />" if self.optimizeStartTag(collect, item[1], item[2], endsep): continue if opcode in ("beginScope", "endScope"): # Push *Scope instructions in front of any text instructions; # this allows text instructions separated only by *Scope # instructions to be joined together. output.append(self.optimizeArgsList(item)) continue if opcode == 'noop': # This is a spacer for end tags in the face of i18n:name # attributes. We can't let the optimizer collect immediately # following end tags into the same rawtextOffset. opcode = None pass text = "".join(collect) if text: i = text.rfind("\n") if i >= 0: i = len(text) - (i + 1) output.append(("rawtextColumn", (text, i))) else: output.append(("rawtextOffset", (text, len(text)))) if opcode != None: output.append(self.optimizeArgsList(item)) collect = [] return self.optimizeCommonTriple(output) def optimizeArgsList(self, item): if len(item) == 2: return item else: return item[0], tuple(item[1:]) # These codes are used to indicate what sort of special actions # are needed for each special attribute. (Simple attributes don't # get action codes.) # # The special actions (which are modal) are handled by # TALInterpreter.attrAction() and .attrAction_tal(). # # Each attribute is represented by a tuple: # # (name, value) -- a simple name/value pair, with # no special processing # # (name, value, action, *extra) -- attribute with special # processing needs, action is a # code that indicates which # branch to take, and *extra # contains additional, # action-specific information # needed by the processing # def optimizeStartTag(self, collect, name, attrlist, end): # return true if the tag can be converted to plain text if not attrlist: collect.append("<%s%s" % (name, end)) return 1 opt = 1 new = ["<" + name] for i in range(len(attrlist)): item = attrlist[i] if len(item) > 2: opt = 0 name, value, action = item[:3] attrlist[i] = (name, value, action) + item[3:] else: if item[1] is None: s = item[0] else: s = '%s="%s"' % (item[0], taldefs.attrEscape(item[1])) attrlist[i] = item[0], s new.append(" " + s) # if no non-optimizable attributes were found, convert to plain text if opt: new.append(end) collect.extend(new) return opt def optimizeCommonTriple(self, program): if len(program) < 3: return program output = program[:2] prev2, prev1 = output for item in program[2:]: if ( item[0] == "beginScope" and prev1[0] == "setPosition" and prev2[0] == "rawtextColumn"): position = output.pop()[1] text, column = output.pop()[1] prev1 = None, None closeprev = 0 if output and output[-1][0] == "endScope": closeprev = 1 output.pop() item = ("rawtextBeginScope", (text, column, position, closeprev, item[1])) output.append(item) prev2 = prev1 prev1 = item return output def todoPush(self, todo): self.todoStack.append(todo) def todoPop(self): return self.todoStack.pop() def compileExpression(self, expr): try: return self.expressionCompiler.compile(expr) except self.CompilerError, err: raise TALError('%s in expression %s' % (err.args[0], `expr`), self.position) def pushProgram(self): self.stack.append(self.program) self.program = [] def popProgram(self): program = self.program self.program = self.stack.pop() return self.optimize(program) def pushSlots(self): self.slotStack.append(self.slots) self.slots = {} def popSlots(self): slots = self.slots self.slots = self.slotStack.pop() return slots def emit(self, *instruction): self.program.append(instruction) def emitStartTag(self, name, attrlist, isend=0): if isend: opcode = "startEndTag" else: opcode = "startTag" self.emit(opcode, name, attrlist) def emitEndTag(self, name): if self.xml and self.program and self.program[-1][0] == "startTag": # Minimize empty element self.program[-1] = ("startEndTag",) + self.program[-1][1:] else: self.emit("endTag", name) def emitOptTag(self, name, optTag, isend): program = self.popProgram() #block start = self.popProgram() #start tag if (isend or not program) and self.xml: # Minimize empty element start[-1] = ("startEndTag",) + start[-1][1:] isend = 1 cexpr = optTag[0] if cexpr: cexpr = self.compileExpression(optTag[0]) self.emit("optTag", name, cexpr, optTag[1], isend, start, program) def emitRawText(self, text): self.emit("rawtext", text) def emitText(self, text): self.emitRawText(cgi.escape(text)) def emitDefines(self, defines): for part in taldefs.splitParts(defines): m = re.match( r"(?s)\s*(?:(global|local)\s+)?(%s)\s+(.*)\Z" % NAME_RE, part) if not m: raise TALError("invalid define syntax: " + `part`, self.position) scope, name, expr = m.group(1, 2, 3) scope = scope or "local" cexpr = self.compileExpression(expr) if scope == "local": self.emit("setLocal", name, cexpr) else: self.emit("setGlobal", name, cexpr) def emitOnError(self, name, onError, TALtag, isend): block = self.popProgram() key, expr = parseSubstitution(onError) cexpr = self.compileExpression(expr) if key == "text": self.emit("insertText", cexpr, []) else: assert key == "structure" self.emit("insertStructure", cexpr, {}, []) if TALtag: self.emitOptTag(name, (None, 1), isend) else: self.emitEndTag(name) handler = self.popProgram() self.emit("onError", block, handler) def emitCondition(self, expr): cexpr = self.compileExpression(expr) program = self.popProgram() self.emit("condition", cexpr, program) def emitRepeat(self, arg): m = re.match("(?s)\s*(%s)\s+(.*)\Z" % NAME_RE, arg) if not m: raise TALError("invalid repeat syntax: " + `arg`, self.position) name, expr = m.group(1, 2) cexpr = self.compileExpression(expr) program = self.popProgram() self.emit("loop", name, cexpr, program) def emitSubstitution(self, arg, attrDict={}): key, expr = parseSubstitution(arg) cexpr = self.compileExpression(expr) program = self.popProgram() if key == "text": self.emit("insertText", cexpr, program) else: assert key == "structure" self.emit("insertStructure", cexpr, attrDict, program) def emitI18nSubstitution(self, arg, attrDict={}): # TODO: Code duplication is BAD, we need to fix it later key, expr = parseSubstitution(arg) cexpr = self.compileExpression(expr) program = self.popProgram() if key == "text": self.emit("insertI18nText", cexpr, program) else: assert key == "structure" self.emit("insertI18nStructure", cexpr, attrDict, program) def emitEvaluateCode(self, lang): program = self.popProgram() self.emit('evaluateCode', lang, program) def emitI18nVariable(self, varname): # Used for i18n:name attributes. m = _name_rx.match(varname) if m is None or m.group() != varname: raise TALError("illegal i18n:name: %r" % varname, self.position) program = self.popProgram() self.emit('i18nVariable', varname, program, None, False) def emitTranslation(self, msgid, i18ndata): program = self.popProgram() if i18ndata is None: self.emit('insertTranslation', msgid, program) else: key, expr = parseSubstitution(i18ndata) cexpr = self.compileExpression(expr) assert key == 'text' self.emit('insertTranslation', msgid, program, cexpr) def emitDefineMacro(self, macroName): program = self.popProgram() macroName = macroName.strip() if self.macros.has_key(macroName): raise METALError("duplicate macro definition: %s" % `macroName`, self.position) if not re.match('%s$' % NAME_RE, macroName): raise METALError("invalid macro name: %s" % `macroName`, self.position) self.macros[macroName] = program self.inMacroDef = self.inMacroDef - 1 self.emit("defineMacro", macroName, program) def emitUseMacro(self, expr): cexpr = self.compileExpression(expr) program = self.popProgram() self.inMacroUse = 0 self.emit("useMacro", expr, cexpr, self.popSlots(), program) def emitExtendMacro(self, defineName, useExpr): cexpr = self.compileExpression(useExpr) program = self.popProgram() self.inMacroUse = 0 self.emit("extendMacro", useExpr, cexpr, self.popSlots(), program, defineName) self.emitDefineMacro(defineName) def emitDefineSlot(self, slotName): program = self.popProgram() slotName = slotName.strip() if not re.match('%s$' % NAME_RE, slotName): raise METALError("invalid slot name: %s" % `slotName`, self.position) self.emit("defineSlot", slotName, program) def emitFillSlot(self, slotName): program = self.popProgram() slotName = slotName.strip() if self.slots.has_key(slotName): raise METALError("duplicate fill-slot name: %s" % `slotName`, self.position) if not re.match('%s$' % NAME_RE, slotName): raise METALError("invalid slot name: %s" % `slotName`, self.position) self.slots[slotName] = program self.inMacroUse = 1 self.emit("fillSlot", slotName, program) def unEmitWhitespace(self): collect = [] i = len(self.program) - 1 while i >= 0: item = self.program[i] if item[0] != "rawtext": break text = item[1] if not re.match(r"\A\s*\Z", text): break collect.append(text) i = i-1 del self.program[i+1:] if i >= 0 and self.program[i][0] == "rawtext": text = self.program[i][1] m = re.search(r"\s+\Z", text) if m: self.program[i] = ("rawtext", text[:m.start()]) collect.append(m.group()) collect.reverse() return "".join(collect) def unEmitNewlineWhitespace(self): collect = [] i = len(self.program) while i > 0: i = i-1 item = self.program[i] if item[0] != "rawtext": break text = item[1] if re.match(r"\A[ \t]*\Z", text): collect.append(text) continue m = re.match(r"(?s)^(.*)(\n[ \t]*)\Z", text) if not m: break text, rest = m.group(1, 2) collect.reverse() rest = rest + "".join(collect) del self.program[i:] if text: self.emit("rawtext", text) return rest return None def replaceAttrs(self, attrlist, repldict): # Each entry in attrlist starts like (name, value). Result is # (name, value, action, expr, xlat, msgid) if there is a # tal:attributes entry for that attribute. Additional attrs # defined only by tal:attributes are added here. # # (name, value, action, expr, xlat, msgid) if not repldict: return attrlist newlist = [] for item in attrlist: key = item[0] if repldict.has_key(key): expr, xlat, msgid = repldict[key] item = item[:2] + ("replace", expr, xlat, msgid) del repldict[key] newlist.append(item) # Add dynamic-only attributes for key, (expr, xlat, msgid) in repldict.items(): newlist.append((key, None, "insert", expr, xlat, msgid)) return newlist def emitStartElement(self, name, attrlist, taldict, metaldict, i18ndict, position=(None, None), isend=0): if not taldict and not metaldict and not i18ndict: # Handle the simple, common case self.emitStartTag(name, attrlist, isend) self.todoPush({}) if isend: self.emitEndElement(name, isend) return self.position = position # TODO: Ugly hack to work around tal:replace and i18n:translate issue. # I (DV) need to cleanup the code later. replaced = False if "replace" in taldict: if "content" in taldict: raise TALError( "tal:content and tal:replace are mutually exclusive", position) taldict["omit-tag"] = taldict.get("omit-tag", "") taldict["content"] = taldict.pop("replace") replaced = True for key, value in taldict.items(): if key not in taldefs.KNOWN_TAL_ATTRIBUTES: raise TALError("bad TAL attribute: " + `key`, position) if not (value or key == 'omit-tag'): raise TALError("missing value for TAL attribute: " + `key`, position) for key, value in metaldict.items(): if key not in taldefs.KNOWN_METAL_ATTRIBUTES: raise METALError("bad METAL attribute: " + `key`, position) if not value: raise TALError("missing value for METAL attribute: " + `key`, position) for key, value in i18ndict.items(): if key not in taldefs.KNOWN_I18N_ATTRIBUTES: raise I18NError("bad i18n attribute: " + `key`, position) if not value and key in ("attributes", "data", "id"): raise I18NError("missing value for i18n attribute: " + `key`, position) todo = {} defineMacro = metaldict.get("define-macro") extendMacro = metaldict.get("extend-macro") useMacro = metaldict.get("use-macro") defineSlot = metaldict.get("define-slot") fillSlot = metaldict.get("fill-slot") define = taldict.get("define") condition = taldict.get("condition") repeat = taldict.get("repeat") content = taldict.get("content") script = taldict.get("script") attrsubst = taldict.get("attributes") onError = taldict.get("on-error") omitTag = taldict.get("omit-tag") TALtag = taldict.get("tal tag") i18nattrs = i18ndict.get("attributes") # Preserve empty string if implicit msgids are used. We'll generate # code with the msgid='' and calculate the right implicit msgid during # interpretation phase. msgid = i18ndict.get("translate") varname = i18ndict.get('name') i18ndata = i18ndict.get('data') if varname and not self.i18nLevel: raise I18NError( "i18n:name can only occur inside a translation unit", position) if i18ndata and not msgid: raise I18NError("i18n:data must be accompanied by i18n:translate", position) if extendMacro: if useMacro: raise METALError( "extend-macro cannot be used with use-macro", position) if not defineMacro: raise METALError( "extend-macro must be used with define-macro", position) if defineMacro or extendMacro or useMacro: if fillSlot or defineSlot: raise METALError( "define-slot and fill-slot cannot be used with " "define-macro, extend-macro, or use-macro", position) if defineMacro and useMacro: raise METALError( "define-macro may not be used with use-macro", position) useMacro = useMacro or extendMacro if content and msgid: raise I18NError( "explicit message id and tal:content can't be used together", position) repeatWhitespace = None if repeat: # Hack to include preceding whitespace in the loop program repeatWhitespace = self.unEmitNewlineWhitespace() if position != (None, None): # TODO: at some point we should insist on a non-trivial position self.emit("setPosition", position) if self.inMacroUse: if fillSlot: self.pushProgram() # generate a source annotation at the beginning of fill-slot if self.source_file is not None: if position != (None, None): self.emit("setPosition", position) self.emit("setSourceFile", self.source_file) todo["fillSlot"] = fillSlot self.inMacroUse = 0 else: if fillSlot: raise METALError("fill-slot must be within a use-macro", position) if not self.inMacroUse: if defineMacro: self.pushProgram() self.emit("version", TAL_VERSION) self.emit("mode", self.xml and "xml" or "html") # generate a source annotation at the beginning of the macro if self.source_file is not None: if position != (None, None): self.emit("setPosition", position) self.emit("setSourceFile", self.source_file) todo["defineMacro"] = defineMacro self.inMacroDef = self.inMacroDef + 1 if useMacro: self.pushSlots() self.pushProgram() todo["useMacro"] = useMacro self.inMacroUse = 1 if defineSlot: if not self.inMacroDef: raise METALError( "define-slot must be within a define-macro", position) self.pushProgram() todo["defineSlot"] = defineSlot if defineSlot or i18ndict: domain = i18ndict.get("domain") or self.i18nContext.domain source = i18ndict.get("source") or self.i18nContext.source target = i18ndict.get("target") or self.i18nContext.target if ( domain != DEFAULT_DOMAIN or source is not None or target is not None): self.i18nContext = TranslationContext(self.i18nContext, domain=domain, source=source, target=target) self.emit("beginI18nContext", {"domain": domain, "source": source, "target": target}) todo["i18ncontext"] = 1 if taldict or i18ndict: dict = {} for item in attrlist: key, value = item[:2] dict[key] = value self.emit("beginScope", dict) todo["scope"] = 1 if onError: self.pushProgram() # handler if TALtag: self.pushProgram() # start self.emitStartTag(name, list(attrlist)) # Must copy attrlist! if TALtag: self.pushProgram() # start self.pushProgram() # block todo["onError"] = onError if define: self.emitDefines(define) todo["define"] = define if condition: self.pushProgram() todo["condition"] = condition if repeat: todo["repeat"] = repeat self.pushProgram() if repeatWhitespace: self.emitText(repeatWhitespace) if content: if varname: todo['i18nvar'] = varname todo["content"] = content self.pushProgram() else: todo["content"] = content # i18n:name w/o tal:replace uses the content as the interpolation # dictionary values elif varname: todo['i18nvar'] = varname self.pushProgram() if msgid is not None: self.i18nLevel += 1 todo['msgid'] = msgid if i18ndata: todo['i18ndata'] = i18ndata optTag = omitTag is not None or TALtag if optTag: todo["optional tag"] = omitTag, TALtag self.pushProgram() if attrsubst or i18nattrs: if attrsubst: repldict = taldefs.parseAttributeReplacements(attrsubst, self.xml) else: repldict = {} if i18nattrs: i18nattrs = _parseI18nAttributes(i18nattrs, self.position, self.xml) else: i18nattrs = {} # Convert repldict's name-->expr mapping to a # name-->(compiled_expr, translate) mapping for key, value in repldict.items(): if i18nattrs.get(key, None): raise I18NError( "attribute [%s] cannot both be part of tal:attributes" " and have a msgid in i18n:attributes" % key, position) ce = self.compileExpression(value) repldict[key] = ce, key in i18nattrs, i18nattrs.get(key) for key in i18nattrs: if key not in repldict: repldict[key] = None, 1, i18nattrs.get(key) else: repldict = {} if replaced: todo["repldict"] = repldict repldict = {} if script: todo["script"] = script self.emitStartTag(name, self.replaceAttrs(attrlist, repldict), isend) if optTag: self.pushProgram() if content and not varname: self.pushProgram() if not content and msgid is not None: self.pushProgram() if content and varname: self.pushProgram() if script: self.pushProgram() if todo and position != (None, None): todo["position"] = position self.todoPush(todo) if isend: self.emitEndElement(name, isend, position=position) def emitEndElement(self, name, isend=0, implied=0, position=(None, None)): todo = self.todoPop() if not todo: # Shortcut if not isend: self.emitEndTag(name) return self.position = todo.get("position", (None, None)) defineMacro = todo.get("defineMacro") useMacro = todo.get("useMacro") defineSlot = todo.get("defineSlot") fillSlot = todo.get("fillSlot") repeat = todo.get("repeat") content = todo.get("content") script = todo.get("script") condition = todo.get("condition") onError = todo.get("onError") repldict = todo.get("repldict", {}) scope = todo.get("scope") optTag = todo.get("optional tag") msgid = todo.get('msgid') i18ncontext = todo.get("i18ncontext") varname = todo.get('i18nvar') i18ndata = todo.get('i18ndata') if implied > 0: if defineMacro or useMacro or defineSlot or fillSlot: exc = METALError what = "METAL" else: exc = TALError what = "TAL" raise exc("%s attributes on <%s> require explicit </%s>" % (what, name, name), self.position) if script: self.emitEvaluateCode(script) # If there's no tal:content or tal:replace in the tag with the # i18n:name, tal:replace is the default. if content: if msgid is not None: self.emitI18nSubstitution(content, repldict) else: self.emitSubstitution(content, repldict) # If we're looking at an implicit msgid, emit the insertTranslation # opcode now, so that the end tag doesn't become part of the implicit # msgid. If we're looking at an explicit msgid, it's better to emit # the opcode after the i18nVariable opcode so we can better handle # tags with both of them in them (and in the latter case, the contents # would be thrown away for msgid purposes). # # Still, we should emit insertTranslation opcode before i18nVariable # in case tal:content, i18n:translate and i18n:name in the same tag if not content and msgid is not None: self.emitTranslation(msgid, i18ndata) self.i18nLevel -= 1 if optTag: self.emitOptTag(name, optTag, isend) elif not isend: # If we're processing the end tag for a tag that contained # i18n:name, we need to make sure that optimize() won't collect # immediately following end tags into the same rawtextOffset, so # put a spacer here that the optimizer will recognize. if varname: self.emit('noop') self.emitEndTag(name) if varname: self.emitI18nVariable(varname) if repeat: self.emitRepeat(repeat) if condition: self.emitCondition(condition) if onError: self.emitOnError(name, onError, optTag and optTag[1], isend) if scope: self.emit("endScope") if i18ncontext: self.emit("endI18nContext") assert self.i18nContext.parent is not None self.i18nContext = self.i18nContext.parent if defineSlot: self.emitDefineSlot(defineSlot) if fillSlot: self.emitFillSlot(fillSlot) if useMacro or defineMacro: if useMacro and defineMacro: self.emitExtendMacro(defineMacro, useMacro) elif useMacro: self.emitUseMacro(useMacro) elif defineMacro: self.emitDefineMacro(defineMacro) if useMacro or defineSlot: # generate a source annotation after define-slot or use-macro # because the source file might have changed if self.source_file is not None: if position != (None, None): self.emit("setPosition", position) self.emit("setSourceFile", self.source_file) def _parseI18nAttributes(i18nattrs, position, xml): d = {} # Filter out empty items, eg: # i18n:attributes="value msgid; name msgid2;" # would result in 3 items where the last one is empty attrs = [spec for spec in i18nattrs.split(";") if spec] for spec in attrs: parts = spec.split() if len(parts) == 2: attr, msgid = parts elif len(parts) == 1: attr = parts[0] msgid = None else: raise TALError("illegal i18n:attributes specification: %r" % spec, position) if not xml: attr = attr.lower() if attr in d: raise TALError( "attribute may only be specified once in i18n:attributes: %r" % attr, position) d[attr] = msgid return d def test(): t = TALGenerator() t.pushProgram() t.emit("bar") p = t.popProgram() t.emit("foo", p) if __name__ == "__main__": test()
gpl-3.0
tarikgwa/nfd
newfies/apirest/queue_serializers.py
1
3182
# -*- coding: utf-8 -*- # # Newfies-Dialer License # http://www.newfies-dialer.org # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Copyright (C) 2011-2014 Star2Billing S.L. # # The primary maintainer of this project is # Arezqui Belaid <info@star2billing.com> # from rest_framework import serializers from callcenter.models import Queue class QueueSerializer(serializers.HyperlinkedModelSerializer): """ **Create**: CURL Usage:: curl -u username:password --dump-header - -H "Content-Type:application/json" -X POST --data '{"name": "queue name"}' http://localhost:8000/rest-api/queue/ Response:: HTTP/1.0 201 CREATED Date: Fri, 14 Jun 2013 09:52:27 GMT Server: WSGIServer/0.1 Python/2.7.3 Vary: Accept, Accept-Language, Cookie Content-Type: application/json; charset=utf-8 Content-Language: en-us Location: http://localhost:8000/rest-api/queue/1/ Allow: GET, POST, HEAD, OPTIONS **Read**: CURL Usage:: curl -u username:password -H 'Accept: application/json' http://localhost:8000/rest-api/queue/ Response:: { "count": 1, "next": null, "previous": null, "results": [ { "manager": "manager", "url": "http://127.0.0.1:8000/rest-api/queue/1/", "name": "Sample queue", "strategy": 5, "moh_sound": "", "record_template": "", "time_base_score": "queue", "tier_rules_apply": false, "tier_rule_wait_second": 300, "tier_rule_wait_multiply_level": true, "tier_rule_no_agent_no_wait": false, "discard_abandoned_after": 14400, "abandoned_resume_allowed": true, "max_wait_time": 0, "max_wait_time_with_no_agent": 120, "max_wait_time_with_no_agent_time_reached": 5, "created_date": "2013-10-23T12:34:20.157Z", "updated_date": "2013-10-23T12:34:20.157Z" } ] } **Update**: CURL Usage:: curl -u username:password --dump-header - -H "Content-Type: application/json" -X PUT --data '{"name": "change name"}' http://localhost:8000/rest-api/queue/%dqueue-id%/ Response:: HTTP/1.0 202 NO CONTENT Date: Fri, 23 Sep 2011 06:46:12 GMT Server: WSGIServer/0.1 Python/2.7.1+ Vary: Accept-Language, Cookie Content-Length: 0 Content-Type: text/html; charset=utf-8 Content-Language: en-us """ manager = serializers.Field(source='manager') class Meta: model = Queue
mpl-2.0
denisff/python-for-android
python3-alpha/python3-src/Lib/distutils/text_file.py
136
12467
"""text_file provides the TextFile class, which gives an interface to text files that (optionally) takes care of stripping comments, ignoring blank lines, and joining lines with backslashes.""" import sys, os, io class TextFile: """Provides a file-like object that takes care of all the things you commonly want to do when processing a text file that has some line-by-line syntax: strip comments (as long as "#" is your comment character), skip blank lines, join adjacent lines by escaping the newline (ie. backslash at end of line), strip leading and/or trailing whitespace. All of these are optional and independently controllable. Provides a 'warn()' method so you can generate warning messages that report physical line number, even if the logical line in question spans multiple physical lines. Also provides 'unreadline()' for implementing line-at-a-time lookahead. Constructor is called as: TextFile (filename=None, file=None, **options) It bombs (RuntimeError) if both 'filename' and 'file' are None; 'filename' should be a string, and 'file' a file object (or something that provides 'readline()' and 'close()' methods). It is recommended that you supply at least 'filename', so that TextFile can include it in warning messages. If 'file' is not supplied, TextFile creates its own using 'io.open()'. The options are all boolean, and affect the value returned by 'readline()': strip_comments [default: true] strip from "#" to end-of-line, as well as any whitespace leading up to the "#" -- unless it is escaped by a backslash lstrip_ws [default: false] strip leading whitespace from each line before returning it rstrip_ws [default: true] strip trailing whitespace (including line terminator!) from each line before returning it skip_blanks [default: true} skip lines that are empty *after* stripping comments and whitespace. (If both lstrip_ws and rstrip_ws are false, then some lines may consist of solely whitespace: these will *not* be skipped, even if 'skip_blanks' is true.) join_lines [default: false] if a backslash is the last non-newline character on a line after stripping comments and whitespace, join the following line to it to form one "logical line"; if N consecutive lines end with a backslash, then N+1 physical lines will be joined to form one logical line. collapse_join [default: false] strip leading whitespace from lines that are joined to their predecessor; only matters if (join_lines and not lstrip_ws) errors [default: 'strict'] error handler used to decode the file content Note that since 'rstrip_ws' can strip the trailing newline, the semantics of 'readline()' must differ from those of the builtin file object's 'readline()' method! In particular, 'readline()' returns None for end-of-file: an empty string might just be a blank line (or an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is not.""" default_options = { 'strip_comments': 1, 'skip_blanks': 1, 'lstrip_ws': 0, 'rstrip_ws': 1, 'join_lines': 0, 'collapse_join': 0, 'errors': 'strict', } def __init__(self, filename=None, file=None, **options): """Construct a new TextFile object. At least one of 'filename' (a string) and 'file' (a file-like object) must be supplied. They keyword argument options are described above and affect the values returned by 'readline()'.""" if filename is None and file is None: raise RuntimeError("you must supply either or both of 'filename' and 'file'") # set values for all options -- either from client option hash # or fallback to default_options for opt in self.default_options.keys(): if opt in options: setattr(self, opt, options[opt]) else: setattr(self, opt, self.default_options[opt]) # sanity check client option hash for opt in options.keys(): if opt not in self.default_options: raise KeyError("invalid TextFile option '%s'" % opt) if file is None: self.open(filename) else: self.filename = filename self.file = file self.current_line = 0 # assuming that file is at BOF! # 'linebuf' is a stack of lines that will be emptied before we # actually read from the file; it's only populated by an # 'unreadline()' operation self.linebuf = [] def open(self, filename): """Open a new file named 'filename'. This overrides both the 'filename' and 'file' arguments to the constructor.""" self.filename = filename self.file = io.open(self.filename, 'r', errors=self.errors) self.current_line = 0 def close(self): """Close the current file and forget everything we know about it (filename, current line number).""" self.file.close() self.file = None self.filename = None self.current_line = None def gen_error(self, msg, line=None): outmsg = [] if line is None: line = self.current_line outmsg.append(self.filename + ", ") if isinstance(line, (list, tuple)): outmsg.append("lines %d-%d: " % tuple(line)) else: outmsg.append("line %d: " % line) outmsg.append(str(msg)) return "".join(outmsg) def error(self, msg, line=None): raise ValueError("error: " + self.gen_error(msg, line)) def warn(self, msg, line=None): """Print (to stderr) a warning message tied to the current logical line in the current file. If the current logical line in the file spans multiple physical lines, the warning refers to the whole range, eg. "lines 3-5". If 'line' supplied, it overrides the current line number; it may be a list or tuple to indicate a range of physical lines, or an integer for a single physical line.""" sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n") def readline(self): """Read and return a single logical line from the current file (or from an internal buffer if lines have previously been "unread" with 'unreadline()'). If the 'join_lines' option is true, this may involve reading multiple physical lines concatenated into a single string. Updates the current line number, so calling 'warn()' after 'readline()' emits a warning about the physical line(s) just read. Returns None on end-of-file, since the empty string can occur if 'rstrip_ws' is true but 'strip_blanks' is not.""" # If any "unread" lines waiting in 'linebuf', return the top # one. (We don't actually buffer read-ahead data -- lines only # get put in 'linebuf' if the client explicitly does an # 'unreadline()'. if self.linebuf: line = self.linebuf[-1] del self.linebuf[-1] return line buildup_line = '' while True: # read the line, make it None if EOF line = self.file.readline() if line == '': line = None if self.strip_comments and line: # Look for the first "#" in the line. If none, never # mind. If we find one and it's the first character, or # is not preceded by "\", then it starts a comment -- # strip the comment, strip whitespace before it, and # carry on. Otherwise, it's just an escaped "#", so # unescape it (and any other escaped "#"'s that might be # lurking in there) and otherwise leave the line alone. pos = line.find("#") if pos == -1: # no "#" -- no comments pass # It's definitely a comment -- either "#" is the first # character, or it's elsewhere and unescaped. elif pos == 0 or line[pos-1] != "\\": # Have to preserve the trailing newline, because it's # the job of a later step (rstrip_ws) to remove it -- # and if rstrip_ws is false, we'd better preserve it! # (NB. this means that if the final line is all comment # and has no trailing newline, we will think that it's # EOF; I think that's OK.) eol = (line[-1] == '\n') and '\n' or '' line = line[0:pos] + eol # If all that's left is whitespace, then skip line # *now*, before we try to join it to 'buildup_line' -- # that way constructs like # hello \\ # # comment that should be ignored # there # result in "hello there". if line.strip() == "": continue else: # it's an escaped "#" line = line.replace("\\#", "#") # did previous line end with a backslash? then accumulate if self.join_lines and buildup_line: # oops: end of file if line is None: self.warn("continuation line immediately precedes " "end-of-file") return buildup_line if self.collapse_join: line = line.lstrip() line = buildup_line + line # careful: pay attention to line number when incrementing it if isinstance(self.current_line, list): self.current_line[1] = self.current_line[1] + 1 else: self.current_line = [self.current_line, self.current_line + 1] # just an ordinary line, read it as usual else: if line is None: # eof return None # still have to be careful about incrementing the line number! if isinstance(self.current_line, list): self.current_line = self.current_line[1] + 1 else: self.current_line = self.current_line + 1 # strip whitespace however the client wants (leading and # trailing, or one or the other, or neither) if self.lstrip_ws and self.rstrip_ws: line = line.strip() elif self.lstrip_ws: line = line.lstrip() elif self.rstrip_ws: line = line.rstrip() # blank line (whether we rstrip'ed or not)? skip to next line # if appropriate if (line == '' or line == '\n') and self.skip_blanks: continue if self.join_lines: if line[-1] == '\\': buildup_line = line[:-1] continue if line[-2:] == '\\\n': buildup_line = line[0:-2] + '\n' continue # well, I guess there's some actual content there: return it return line def readlines(self): """Read and return the list of all logical lines remaining in the current file.""" lines = [] while True: line = self.readline() if line is None: return lines lines.append(line) def unreadline(self, line): """Push 'line' (a string) onto an internal buffer that will be checked by future 'readline()' calls. Handy for implementing a parser with line-at-a-time lookahead.""" self.linebuf.append(line)
apache-2.0
kkdd/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/popen2.py
304
8416
"""Spawn a command with pipes to its stdin, stdout, and optionally stderr. The normal os.popen(cmd, mode) call spawns a shell command and provides a file interface to just the input or output of the process depending on whether mode is 'r' or 'w'. This module provides the functions popen2(cmd) and popen3(cmd) which return two or three pipes to the spawned command. """ import os import sys import warnings warnings.warn("The popen2 module is deprecated. Use the subprocess module.", DeprecationWarning, stacklevel=2) __all__ = ["popen2", "popen3", "popen4"] try: MAXFD = os.sysconf('SC_OPEN_MAX') except (AttributeError, ValueError): MAXFD = 256 _active = [] def _cleanup(): for inst in _active[:]: if inst.poll(_deadstate=sys.maxint) >= 0: try: _active.remove(inst) except ValueError: # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. pass class Popen3: """Class representing a child process. Normally, instances are created internally by the functions popen2() and popen3().""" sts = -1 # Child not completed yet def __init__(self, cmd, capturestderr=False, bufsize=-1): """The parameter 'cmd' is the shell command to execute in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). The 'capturestderr' flag, if true, specifies that the object should capture standard error output of the child process. The default is false. If the 'bufsize' parameter is specified, it specifies the size of the I/O buffers to/from the child process.""" _cleanup() self.cmd = cmd p2cread, p2cwrite = os.pipe() c2pread, c2pwrite = os.pipe() if capturestderr: errout, errin = os.pipe() self.pid = os.fork() if self.pid == 0: # Child os.dup2(p2cread, 0) os.dup2(c2pwrite, 1) if capturestderr: os.dup2(errin, 2) self._run_child(cmd) os.close(p2cread) self.tochild = os.fdopen(p2cwrite, 'w', bufsize) os.close(c2pwrite) self.fromchild = os.fdopen(c2pread, 'r', bufsize) if capturestderr: os.close(errin) self.childerr = os.fdopen(errout, 'r', bufsize) else: self.childerr = None def __del__(self): # In case the child hasn't been waited on, check if it's done. self.poll(_deadstate=sys.maxint) if self.sts < 0: if _active is not None: # Child is still running, keep us alive until we can wait on it. _active.append(self) def _run_child(self, cmd): if isinstance(cmd, basestring): cmd = ['/bin/sh', '-c', cmd] os.closerange(3, MAXFD) try: os.execvp(cmd[0], cmd) finally: os._exit(1) def poll(self, _deadstate=None): """Return the exit status of the child process if it has finished, or -1 if it hasn't finished yet.""" if self.sts < 0: try: pid, sts = os.waitpid(self.pid, os.WNOHANG) # pid will be 0 if self.pid hasn't terminated if pid == self.pid: self.sts = sts except os.error: if _deadstate is not None: self.sts = _deadstate return self.sts def wait(self): """Wait for and return the exit status of the child process.""" if self.sts < 0: pid, sts = os.waitpid(self.pid, 0) # This used to be a test, but it is believed to be # always true, so I changed it to an assertion - mvl assert pid == self.pid self.sts = sts return self.sts class Popen4(Popen3): childerr = None def __init__(self, cmd, bufsize=-1): _cleanup() self.cmd = cmd p2cread, p2cwrite = os.pipe() c2pread, c2pwrite = os.pipe() self.pid = os.fork() if self.pid == 0: # Child os.dup2(p2cread, 0) os.dup2(c2pwrite, 1) os.dup2(c2pwrite, 2) self._run_child(cmd) os.close(p2cread) self.tochild = os.fdopen(p2cwrite, 'w', bufsize) os.close(c2pwrite) self.fromchild = os.fdopen(c2pread, 'r', bufsize) if sys.platform[:3] == "win" or sys.platform == "os2emx": # Some things don't make sense on non-Unix platforms. del Popen3, Popen4 def popen2(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout, child_stdin) are returned.""" w, r = os.popen2(cmd, mode, bufsize) return r, w def popen3(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout, child_stdin, child_stderr) are returned.""" w, r, e = os.popen3(cmd, mode, bufsize) return r, w, e def popen4(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout_stderr, child_stdin) are returned.""" w, r = os.popen4(cmd, mode, bufsize) return r, w else: def popen2(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout, child_stdin) are returned.""" inst = Popen3(cmd, False, bufsize) return inst.fromchild, inst.tochild def popen3(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout, child_stdin, child_stderr) are returned.""" inst = Popen3(cmd, True, bufsize) return inst.fromchild, inst.tochild, inst.childerr def popen4(cmd, bufsize=-1, mode='t'): """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If 'cmd' is a string it will be passed to the shell (as with os.system()). If 'bufsize' is specified, it sets the buffer size for the I/O pipes. The file objects (child_stdout_stderr, child_stdin) are returned.""" inst = Popen4(cmd, bufsize) return inst.fromchild, inst.tochild __all__.extend(["Popen3", "Popen4"])
apache-2.0
hugobowne/playing_with_twitter
sql_test.py
1
3384
# -*- coding: utf-8 -*- """ Created on Fri Nov 6 16:52:06 2015 @author: hugobowne-anderson """ #http://zetcode.com/db/sqlitepythontutorial/ import sqlite3 as lite import sys import json import unicodedata ###########MAKE LIST OF TWEETS:################ ##set deirectory here: tweets_data_path = '/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets/some_stream_pol.txt' tweets_data = [] tweets_file = open(tweets_data_path, "r") for line in tweets_file: try: tweet = json.loads(line) tweets_data.append(tweet) except: continue print len(tweets_data) tweets_data.pop(0) tweets_data[0].keys() tweets_data[0]['place'] type(i['place']) for i in tweets_data: try: print i['place']['full_name'] except: print "Shiiiiiiiite: something missing in data" ############################################ ####SQLITE#EG######### con = lite.connect('test.db') ##connect to db cur = con.cursor() #get cursor objet to traverse records from result set cur.execute('SELECT SQLITE_VERSION()') ##execute method of cursor to execute an SQL statement/query data = cur.fetchone() ##fetch data (one record for the time being) print "SQLite version: %s" % data con.close() ############################################ ###add tweets to db con = lite.connect('/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets//test.db') ##connect to db cur = con.cursor() cur.execute("DROP TABLE Tweets") cur.execute("CREATE TABLE Tweets(Name TEXT, Tweet TEXT, Language TEXT, Created_at TEXT, Geo TEXT, Place TEXT)") for i in tweets_data: #aaaaaaand do check this out: http://sebastianraschka.com/Articles/2014_sqlite_in_python_tutorial.html #print i['text'] try: user = i['user']['name']; except KeyError: user = None; try: place = i['place']['full_name']; except: place = None; try: cur.execute("INSERT INTO Tweets VALUES(? , ? , ? , ? , ? , ?)", (user, i['text'] , i['lang'] , i['timestamp_ms'] , i['geo'] , place)); except: print "Shiiiiiiiite: something missing in data" con.commit() #commit changes to db con.close() #close connection i.user() #####test & select################ #cur.execute("SELECT * FROM Tweets"); con = lite.connect('/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets//test.db') ##connect to db cur = con.cursor() #cur.execute("SELECT Language FROM Tweets"); #rows = cur.fetchall() #for row in rows: # print row cur.execute("SELECT place, count(place) from Tweets group by place"); rows = cur.fetchall() for row in rows: print row cur.execute("SELECT Language, count(Language) from Tweets group by Language"); rows = cur.fetchall() type(rows) #plot(rows) x = []; y = []; for row in rows: x.append(row[0]); y.append(row[1]); import numpy as np import matplotlib.pyplot as plt #http://matplotlib.org/api/pyplot_api.html N = len(x) ind = np.arange(N) # the x locations for the groups width = 0.35 # the width of the bars: can also be len(x) sequence plt.bar(ind, y ) plt.xticks(ind + width/2., x , rotation=90) plt.ylabel('Number of tweets') plt.ylabel('Language') ##################################### ########################################################
gpl-2.0
kehao95/Wechat_LearnHelper
src/env/lib/python3.5/site-packages/setuptools/command/install_egg_info.py
423
4001
from distutils import log, dir_util import os from setuptools import Command from setuptools.archive_util import unpack_archive import pkg_resources class install_egg_info(Command): """Install an .egg-info directory for the package""" description = "Install an .egg-info directory for the package" user_options = [ ('install-dir=', 'd', "directory to install to"), ] def initialize_options(self): self.install_dir = None def finalize_options(self): self.set_undefined_options('install_lib', ('install_dir', 'install_dir')) ei_cmd = self.get_finalized_command("egg_info") basename = pkg_resources.Distribution( None, None, ei_cmd.egg_name, ei_cmd.egg_version ).egg_name() + '.egg-info' self.source = ei_cmd.egg_info self.target = os.path.join(self.install_dir, basename) self.outputs = [self.target] def run(self): self.run_command('egg_info') if os.path.isdir(self.target) and not os.path.islink(self.target): dir_util.remove_tree(self.target, dry_run=self.dry_run) elif os.path.exists(self.target): self.execute(os.unlink, (self.target,), "Removing " + self.target) if not self.dry_run: pkg_resources.ensure_directory(self.target) self.execute( self.copytree, (), "Copying %s to %s" % (self.source, self.target) ) self.install_namespaces() def get_outputs(self): return self.outputs def copytree(self): # Copy the .egg-info tree to site-packages def skimmer(src, dst): # filter out source-control directories; note that 'src' is always # a '/'-separated path, regardless of platform. 'dst' is a # platform-specific path. for skip in '.svn/', 'CVS/': if src.startswith(skip) or '/' + skip in src: return None self.outputs.append(dst) log.debug("Copying %s to %s", src, dst) return dst unpack_archive(self.source, self.target, skimmer) def install_namespaces(self): nsp = self._get_all_ns_packages() if not nsp: return filename, ext = os.path.splitext(self.target) filename += '-nspkg.pth' self.outputs.append(filename) log.info("Installing %s", filename) lines = map(self._gen_nspkg_line, nsp) if self.dry_run: # always generate the lines, even in dry run list(lines) return with open(filename, 'wt') as f: f.writelines(lines) _nspkg_tmpl = ( "import sys, types, os", "p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)", "ie = os.path.exists(os.path.join(p,'__init__.py'))", "m = not ie and " "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", "mp = (m or []) and m.__dict__.setdefault('__path__',[])", "(p not in mp) and mp.append(p)", ) "lines for the namespace installer" _nspkg_tmpl_multi = ( 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', ) "additional line(s) when a parent package is indicated" @classmethod def _gen_nspkg_line(cls, pkg): # ensure pkg is not a unicode string under Python 2.7 pkg = str(pkg) pth = tuple(pkg.split('.')) tmpl_lines = cls._nspkg_tmpl parent, sep, child = pkg.rpartition('.') if parent: tmpl_lines += cls._nspkg_tmpl_multi return ';'.join(tmpl_lines) % locals() + '\n' def _get_all_ns_packages(self): """Return sorted list of all package namespaces""" nsp = set() for pkg in self.distribution.namespace_packages or []: pkg = pkg.split('.') while pkg: nsp.add('.'.join(pkg)) pkg.pop() return sorted(nsp)
gpl-3.0
Jet-Streaming/framework
deps/googletest/googlemock/scripts/generator/cpp/gmock_class_test.py
395
11356
#!/usr/bin/env python # # Copyright 2009 Neal Norwitz All Rights Reserved. # Portions Copyright 2009 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for gmock.scripts.generator.cpp.gmock_class.""" __author__ = 'nnorwitz@google.com (Neal Norwitz)' import os import sys import unittest # Allow the cpp imports below to work when run as a standalone script. sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from cpp import ast from cpp import gmock_class class TestCase(unittest.TestCase): """Helper class that adds assert methods.""" def StripLeadingWhitespace(self, lines): """Strip leading whitespace in each line in 'lines'.""" return '\n'.join([s.lstrip() for s in lines.split('\n')]) def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines): """Specialized assert that ignores the indent level.""" self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines)) class GenerateMethodsTest(TestCase): def GenerateMethodSource(self, cpp_source): """Convert C++ source to Google Mock output source lines.""" method_source_lines = [] # <test> is a pseudo-filename, it is not read or written. builder = ast.BuilderFromSource(cpp_source, '<test>') ast_list = list(builder.Generate()) gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0]) return '\n'.join(method_source_lines) def testSimpleMethod(self): source = """ class Foo { public: virtual int Bar(); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testSimpleConstructorsAndDestructor(self): source = """ class Foo { public: Foo(); Foo(int x); Foo(const Foo& f); Foo(Foo&& f); ~Foo(); virtual int Bar() = 0; }; """ # The constructors and destructor should be ignored. self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testVirtualDestructor(self): source = """ class Foo { public: virtual ~Foo(); virtual int Bar() = 0; }; """ # The destructor should be ignored. self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testExplicitlyDefaultedConstructorsAndDestructor(self): source = """ class Foo { public: Foo() = default; Foo(const Foo& f) = default; Foo(Foo&& f) = default; ~Foo() = default; virtual int Bar() = 0; }; """ # The constructors and destructor should be ignored. self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testExplicitlyDeletedConstructorsAndDestructor(self): source = """ class Foo { public: Foo() = delete; Foo(const Foo& f) = delete; Foo(Foo&& f) = delete; ~Foo() = delete; virtual int Bar() = 0; }; """ # The constructors and destructor should be ignored. self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testSimpleOverrideMethod(self): source = """ class Foo { public: int Bar() override; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint());', self.GenerateMethodSource(source)) def testSimpleConstMethod(self): source = """ class Foo { public: virtual void Bar(bool flag) const; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));', self.GenerateMethodSource(source)) def testExplicitVoid(self): source = """ class Foo { public: virtual int Bar(void); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0(Bar,\nint(void));', self.GenerateMethodSource(source)) def testStrangeNewlineInParameter(self): source = """ class Foo { public: virtual void Bar(int a) = 0; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD1(Bar,\nvoid(int a));', self.GenerateMethodSource(source)) def testDefaultParameters(self): source = """ class Foo { public: virtual void Bar(int a, char c = 'x') = 0; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD2(Bar,\nvoid(int, char));', self.GenerateMethodSource(source)) def testMultipleDefaultParameters(self): source = """ class Foo { public: virtual void Bar(int a = 42, char c = 'x') = 0; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD2(Bar,\nvoid(int, char));', self.GenerateMethodSource(source)) def testRemovesCommentsWhenDefaultsArePresent(self): source = """ class Foo { public: virtual void Bar(int a = 42 /* a comment */, char /* other comment */ c= 'x') = 0; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD2(Bar,\nvoid(int, char));', self.GenerateMethodSource(source)) def testDoubleSlashCommentsInParameterListAreRemoved(self): source = """ class Foo { public: virtual void Bar(int a, // inline comments should be elided. int b // inline comments should be elided. ) const = 0; }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));', self.GenerateMethodSource(source)) def testCStyleCommentsInParameterListAreNotRemoved(self): # NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these # comments. Also note that C style comments after the last parameter # are still elided. source = """ class Foo { public: virtual const string& Bar(int /* keeper */, int b); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));', self.GenerateMethodSource(source)) def testArgsOfTemplateTypes(self): source = """ class Foo { public: virtual int Bar(const vector<int>& v, map<int, string>* output); };""" self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD2(Bar,\n' 'int(const vector<int>& v, map<int, string>* output));', self.GenerateMethodSource(source)) def testReturnTypeWithOneTemplateArg(self): source = """ class Foo { public: virtual vector<int>* Bar(int n); };""" self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD1(Bar,\nvector<int>*(int n));', self.GenerateMethodSource(source)) def testReturnTypeWithManyTemplateArgs(self): source = """ class Foo { public: virtual map<int, string> Bar(); };""" # Comparing the comment text is brittle - we'll think of something # better in case this gets annoying, but for now let's keep it simple. self.assertEqualIgnoreLeadingWhitespace( '// The following line won\'t really compile, as the return\n' '// type has multiple template arguments. To fix it, use a\n' '// typedef for the return type.\n' 'MOCK_METHOD0(Bar,\nmap<int, string>());', self.GenerateMethodSource(source)) def testSimpleMethodInTemplatedClass(self): source = """ template<class T> class Foo { public: virtual int Bar(); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD0_T(Bar,\nint());', self.GenerateMethodSource(source)) def testPointerArgWithoutNames(self): source = """ class Foo { virtual int Bar(C*); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD1(Bar,\nint(C*));', self.GenerateMethodSource(source)) def testReferenceArgWithoutNames(self): source = """ class Foo { virtual int Bar(C&); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD1(Bar,\nint(C&));', self.GenerateMethodSource(source)) def testArrayArgWithoutNames(self): source = """ class Foo { virtual int Bar(C[]); }; """ self.assertEqualIgnoreLeadingWhitespace( 'MOCK_METHOD1(Bar,\nint(C[]));', self.GenerateMethodSource(source)) class GenerateMocksTest(TestCase): def GenerateMocks(self, cpp_source): """Convert C++ source to complete Google Mock output source.""" # <test> is a pseudo-filename, it is not read or written. filename = '<test>' builder = ast.BuilderFromSource(cpp_source, filename) ast_list = list(builder.Generate()) lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None) return '\n'.join(lines) def testNamespaces(self): source = """ namespace Foo { namespace Bar { class Forward; } namespace Baz { class Test { public: virtual void Foo(); }; } // namespace Baz } // namespace Foo """ expected = """\ namespace Foo { namespace Baz { class MockTest : public Test { public: MOCK_METHOD0(Foo, void()); }; } // namespace Baz } // namespace Foo """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) def testClassWithStorageSpecifierMacro(self): source = """ class STORAGE_SPECIFIER Test { public: virtual void Foo(); }; """ expected = """\ class MockTest : public Test { public: MOCK_METHOD0(Foo, void()); }; """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) def testTemplatedForwardDeclaration(self): source = """ template <class T> class Forward; // Forward declaration should be ignored. class Test { public: virtual void Foo(); }; """ expected = """\ class MockTest : public Test { public: MOCK_METHOD0(Foo, void()); }; """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) def testTemplatedClass(self): source = """ template <typename S, typename T> class Test { public: virtual void Foo(); }; """ expected = """\ template <typename T0, typename T1> class MockTest : public Test<T0, T1> { public: MOCK_METHOD0_T(Foo, void()); }; """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) def testTemplateInATemplateTypedef(self): source = """ class Test { public: typedef std::vector<std::list<int>> FooType; virtual void Bar(const FooType& test_arg); }; """ expected = """\ class MockTest : public Test { public: MOCK_METHOD1(Bar, void(const FooType& test_arg)); }; """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) def testTemplateInATemplateTypedefWithComma(self): source = """ class Test { public: typedef std::function<void( const vector<std::list<int>>&, int> FooType; virtual void Bar(const FooType& test_arg); }; """ expected = """\ class MockTest : public Test { public: MOCK_METHOD1(Bar, void(const FooType& test_arg)); }; """ self.assertEqualIgnoreLeadingWhitespace( expected, self.GenerateMocks(source)) if __name__ == '__main__': unittest.main()
mpl-2.0
cschnei3/forseti-security
google/cloud/security/common/gcp_type/instance_template.py
1
2890
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Compute InstanceTemplate. See: https://cloud.google.com/compute/docs/reference/latest/instanceTemplates """ from google.cloud.security.common.gcp_type import key class InstanceTemplate(object): """Represents InstanceTemplate resource.""" def __init__(self, **kwargs): """InstanceTemplate resource. Args: kwargs: The object's attributes. """ self.creation_timestamp = kwargs.get('creation_timestamp') self.description = kwargs.get('description') self.name = kwargs.get('name') self.properties = kwargs.get('properties') self.resource_id = kwargs.get('id') self.project_id = kwargs.get('project_id') @property def key(self): """Returns a Key identifying the object. Returns: Key: the key """ return Key.from_args(self.project_id, self.name) KEY_OBJECT_KIND = 'InstanceTemplate' class Key(key.Key): """An identifier for a specific instance template.""" @staticmethod def from_args(project_id, name): """Construct a Key from specific values. Args: project_id (str): project_id name (str): name Returns: Key: the key """ return Key(KEY_OBJECT_KIND, { 'project_id': project_id, 'name': name}) @staticmethod def from_url(url): """Construct a Key from a URL. Args: url (str): Object reference URL Returns: Key: the key Raises: ValueError: Required parameters are missing. """ obj = Key._from_url(KEY_OBJECT_KIND, {'projects': 'project_id', 'instanceTemplates': 'name'}, url) if obj.project_id is None or obj.name is None: raise ValueError('Missing fields in URL %r' % url) return obj @property def project_id(self): """Object property: project_id Returns: str: project_id """ return self._path_component('project_id') @property def name(self): """Object property: name Returns: str: name """ return self._path_component('name')
apache-2.0
pkainz/pylearn2
pylearn2/sandbox/cuda_convnet/tests/test_weight_acts_strided.py
44
6942
from __future__ import print_function __authors__ = "Heng Luo" from pylearn2.testing.skip import skip_if_no_gpu skip_if_no_gpu() import numpy as np from theano.compat.six.moves import xrange from theano import shared from theano.tensor import grad, constant from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs from theano.sandbox.cuda import gpu_from_host from theano.sandbox.cuda import host_from_gpu from theano.sandbox.rng_mrg import MRG_RandomStreams from theano.tensor.nnet.conv import conv2d from theano.tensor import as_tensor_variable from theano import function from theano import tensor as T import warnings from theano.sandbox import cuda from theano.sandbox.cuda.var import float32_shared_constructor from test_filter_acts_strided import FilterActs_python def WeightActs_python(images, hidacts, filter_rows, filter_cols, stride=1, ): if int(stride) != stride: raise TypeError('stride must be an int', stride) stride = int(stride) channels, rows, cols, batch_size = images.shape num_filters, hidact_rows, hidact_cols, _batch_size = hidacts.shape assert _batch_size == batch_size assert filter_rows == filter_cols f_shape = (channels, filter_rows, filter_cols, num_filters) f = np.zeros(f_shape,dtype='float32') if stride > 1: if (rows - filter_rows)%stride == 0: stride_padding_rows = 0 else: stride_padding_rows = ((rows - filter_rows)/stride + 1)*stride + filter_rows - rows idx_rows = (rows + stride_padding_rows - filter_rows)/stride if (cols - filter_cols)%stride == 0: stride_padding_cols = 0 else: stride_padding_cols = ((cols - filter_cols)/stride + 1)*stride + filter_cols - cols idx_cols = (cols + stride_padding_cols - filter_cols)/stride new_rows = rows + stride_padding_rows new_cols = cols + stride_padding_cols idx_rows = (new_rows - filter_rows)/stride idx_cols = (new_cols - filter_cols)/stride new_images = np.zeros((channels, new_rows, new_cols, batch_size),dtype='float32') new_images[:,:rows,:cols,:] = images else: new_images = images n_dim_filter = channels*filter_rows*filter_cols for idx_filters in xrange(num_filters): for idx_h_rows in xrange(hidact_rows): for idx_h_cols in xrange(hidact_cols): rc_images = new_images[:, idx_h_rows*stride:idx_h_rows*stride+filter_rows, idx_h_cols*stride:idx_h_cols*stride+filter_cols, :] rc_filters = np.dot( hidacts[idx_filters,idx_h_rows,idx_h_cols,:].reshape(1,batch_size), rc_images.reshape(n_dim_filter, batch_size).T) f[:,:,:,idx_filters] += rc_filters.reshape(channels, filter_rows, filter_cols) return f def test_weight_acts_strided(): # Tests that WeightActs with all possible strides rng = np.random.RandomState([2012,10,9]) #Each list in shape_list : #[img_shape,filter_shape] #[(channels, rows, cols, batch_size),(channels, filter_rows, filter_cols, num_filters)] shape_list = [[(1, 7, 8, 5), (1, 2, 2, 16)], [(3, 7, 8, 5), (3, 3, 3, 16)], [(16, 11, 11, 4), (16, 4, 4, 16)], [(3, 20, 20, 3), (3, 5, 5, 16)], [(3, 21, 21, 3), (3, 6, 6, 16)], ] for partial_sum in [0, 1, 4]: print("partial_sum: %d"%(partial_sum)) for test_idx in xrange(len(shape_list)): images = rng.uniform(-1., 1., shape_list[test_idx][0]).astype('float32') filters = rng.uniform(-1., 1., shape_list[test_idx][1]).astype('float32') gpu_images = float32_shared_constructor(images,name='images') print("test case %d..."%(test_idx+1)) for ii in xrange(filters.shape[1]): stride = ii + 1 output_python = FilterActs_python(images,filters,stride) _, h_rows, h_cols, _ = output_python.shape if partial_sum == 4: if (h_rows*h_cols)%partial_sum != 0: print("skip test case %d, stride %d when partial_sum is equal to %d"%(test_idx+1,stride,partial_sum)) break hidacts = rng.uniform(-1., 1., output_python.shape).astype('float32') gpu_hidacts = float32_shared_constructor(hidacts,name='hidacts') weights_grad_python = WeightActs_python(images,hidacts,filters.shape[1],filters.shape[2],stride) weights_grad = WeightActs(partial_sum=partial_sum,stride=stride)( gpu_images, gpu_hidacts, as_tensor_variable((filters.shape[1], filters.shape[2])) )[0] weights_grad = host_from_gpu(weights_grad) f = function([], weights_grad) weights_grad_val = f() warnings.warn("""test_weight_acts_strided success criterion is not very strict.""") if np.abs(weights_grad_val - weights_grad_python).max() > 3.4e-5: assert type(weights_grad_val) == type(weights_grad_python) assert weights_grad_val.dtype == weights_grad_python.dtype if weights_grad_val.shape != weights_grad_python.shape: print('cuda-convnet shape: ',weights_grad_val.shape) print('python conv shape: ',weights_grad_python.shape) assert False err = np.abs(weights_grad_val - weights_grad_python) print('stride %d'%stride) print('absolute error range: ', (err.min(), err.max())) print('mean absolute error: ', err.mean()) print('cuda-convnet value range: ', (weights_grad_val.min(), weights_grad_val.max())) print('python conv value range: ', (weights_grad_python.min(), weights_grad_python.max())) #assert False #print "stride %d"%stride #print "pass" if __name__ == '__main__': test_weight_acts_strided()
bsd-3-clause
ArneBab/pypyjs
website/demo/home/rfk/repos/pypy/lib-python/2.7/codeop.py
306
5999
r"""Utilities to compile possibly incomplete Python source code. This module provides two interfaces, broadly similar to the builtin function compile(), which take program text, a filename and a 'mode' and: - Return code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). Approach: First, check if the source consists entirely of blank lines and comments; if so, replace it with 'pass', because the built-in parser doesn't always do the right thing for these. Compile three times: as is, with \n, and with \n\n appended. If it compiles as is, it's complete. If it compiles with one \n appended, we expect more. If it doesn't compile either way, we compare the error we get when compiling with \n or \n\n appended. If the errors are the same, the code is broken. But if the errors are different, we expect more. Not intuitive; not even guaranteed to hold in future releases; but this matches the compiler's behavior from Python 1.4 through 2.2, at least. Caveat: It is possible (but not likely) that the parser stops parsing with a successful outcome before reaching the end of the source; in this case, trailing symbols may be ignored instead of causing an error. For example, a backslash followed by two newlines may be followed by arbitrary garbage. This will be fixed once the API for the parser is better. The two interfaces are: compile_command(source, filename, symbol): Compiles a single command in the manner described above. CommandCompiler(): Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force. The module also provides another class: Compile(): Instances of this class act like the built-in function compile, but with 'memory' in the sense described above. """ import __future__ _features = [getattr(__future__, fname) for fname in __future__.all_feature_names] __all__ = ["compile_command", "Compile", "CommandCompiler"] PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h def _maybe_compile(compiler, source, filename, symbol): # Check for source consisting of only blank lines and comments for line in source.split("\n"): line = line.strip() if line and line[0] != '#': break # Leave it alone else: if symbol != "eval": source = "pass" # Replace it with a 'pass' statement err = err1 = err2 = None code = code1 = code2 = None try: code = compiler(source, filename, symbol) except SyntaxError, err: pass try: code1 = compiler(source + "\n", filename, symbol) except SyntaxError, err1: pass try: code2 = compiler(source + "\n\n", filename, symbol) except SyntaxError, err2: pass if code: return code if not code1 and repr(err1) == repr(err2): raise SyntaxError, err1 def _compile(source, filename, symbol): return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT) def compile_command(source, filename="<input>", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default) or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(_compile, source, filename, symbol) class Compile: """Instances of this class behave much like the built-in compile function, but if one is used to compile text containing a future statement, it "remembers" and compiles all subsequent program texts with the statement in force.""" def __init__(self): self.flags = PyCF_DONT_IMPLY_DEDENT def __call__(self, source, filename, symbol): codeob = compile(source, filename, symbol, self.flags, 1) for feature in _features: if codeob.co_flags & feature.compiler_flag: self.flags |= feature.compiler_flag return codeob class CommandCompiler: """Instances of this class have __call__ methods identical in signature to compile_command; the difference is that if the instance compiles program text containing a __future__ statement, the instance 'remembers' and compiles all subsequent program texts with the statement in force.""" def __init__(self,): self.compiler = Compile() def __call__(self, source, filename="<input>", symbol="single"): r"""Compile a command and determine whether it is incomplete. Arguments: source -- the source string; may contain \n characters filename -- optional filename from which source was read; default "<input>" symbol -- optional grammar start symbol; "single" (default) or "eval" Return value / exceptions raised: - Return a code object if the command is complete and valid - Return None if the command is incomplete - Raise SyntaxError, ValueError or OverflowError if the command is a syntax error (OverflowError and ValueError can be produced by malformed literals). """ return _maybe_compile(self.compiler, source, filename, symbol)
mit
Distrotech/intellij-community
python/helpers/profiler/thrift/protocol/TBase.py
208
2637
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from thrift.Thrift import * from thrift.protocol import TBinaryProtocol from thrift.transport import TTransport try: from thrift.protocol import fastbinary except: fastbinary = None class TBase(object): __slots__ = [] def __repr__(self): L = ['%s=%r' % (key, getattr(self, key)) for key in self.__slots__] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): if not isinstance(other, self.__class__): return False for attr in self.__slots__: my_val = getattr(self, attr) other_val = getattr(other, attr) if my_val != other_val: return False return True def __ne__(self, other): return not (self == other) def read(self, iprot): if (iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None): fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStruct(self, self.thrift_spec) def write(self, oprot): if (oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None): oprot.trans.write( fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStruct(self, self.thrift_spec) class TExceptionBase(Exception): # old style class so python2.4 can raise exceptions derived from this # This can't inherit from TBase because of that limitation. __slots__ = [] __repr__ = TBase.__repr__.im_func __eq__ = TBase.__eq__.im_func __ne__ = TBase.__ne__.im_func read = TBase.read.im_func write = TBase.write.im_func
apache-2.0
jeremiahyan/odoo
addons/hw_drivers/interface.py
2
3092
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import logging from threading import Thread import time from odoo.addons.hw_drivers.main import drivers, interfaces, iot_devices _logger = logging.getLogger(__name__) class InterfaceMetaClass(type): def __new__(cls, clsname, bases, attrs): new_interface = super(InterfaceMetaClass, cls).__new__(cls, clsname, bases, attrs) interfaces[clsname] = new_interface return new_interface class Interface(Thread, metaclass=InterfaceMetaClass): _loop_delay = 3 # Delay (in seconds) between calls to get_devices or 0 if it should be called only once _detected_devices = {} connection_type = '' def __init__(self): super(Interface, self).__init__() self.drivers = sorted([d for d in drivers if d.connection_type == self.connection_type], key=lambda d: d.priority, reverse=True) def run(self): while self.connection_type and self.drivers: self.update_iot_devices(self.get_devices()) if not self._loop_delay: break time.sleep(self._loop_delay) def update_iot_devices(self, devices={}): added = devices.keys() - self._detected_devices removed = self._detected_devices - devices.keys() # keys() returns a dict_keys, and the values of that stay in sync with the # original dictionary if it changes. This means that get_devices needs to return # a newly created dictionary every time. If it doesn't do that and reuses the # same dictionary, this logic won't detect any changes that are made. Could be # avoided by converting the dict_keys into a regular dict. The current logic # also can't detect if a device is replaced by a different one with the same # key. Also, _detected_devices starts out as a class variable but gets turned # into an instance variable here. It would be better if it was an instance # variable from the start to avoid confusion. self._detected_devices = devices.keys() for identifier in removed: if identifier in iot_devices: iot_devices[identifier].disconnect() _logger.info('Device %s is now disconnected', identifier) for identifier in added: for driver in self.drivers: if driver.supported(devices[identifier]): _logger.info('Device %s is now connected', identifier) d = driver(identifier, devices[identifier]) d.daemon = True iot_devices[identifier] = d # Start the thread after creating the iot_devices entry so the # thread can assume the iot_devices entry will exist while it's # running, at least until the `disconnect` above gets triggered # when `removed` is not empty. d.start() break def get_devices(self): raise NotImplementedError()
gpl-3.0
markeTIC/OCB
addons/l10n_hr/__openerp__.py
430
2728
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Module: l10n_hr # Author: Goran Kliska # mail: goran.kliska(AT)slobodni-programi.hr # Copyright: Slobodni programi d.o.o., Zagreb # Contributions: # Tomislav Bošnjaković, Storm Computers d.o.o. : # - account types # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name": "Croatia - RRIF 2012 COA", "description": """ Croatian localisation. ====================== Author: Goran Kliska, Slobodni programi d.o.o., Zagreb http://www.slobodni-programi.hr Contributions: Tomislav Bošnjaković, Storm Computers: tipovi konta Ivan Vađić, Slobodni programi: tipovi konta Description: Croatian Chart of Accounts (RRIF ver.2012) RRIF-ov računski plan za poduzetnike za 2012. Vrste konta Kontni plan prema RRIF-u, dorađen u smislu kraćenja naziva i dodavanja analitika Porezne grupe prema poreznoj prijavi Porezi PDV obrasca Ostali porezi Osnovne fiskalne pozicije Izvori podataka: http://www.rrif.hr/dok/preuzimanje/rrif-rp2011.rar http://www.rrif.hr/dok/preuzimanje/rrif-rp2012.rar """, "version": "12.2", "author": "OpenERP Croatian Community", "category": 'Localization/Account Charts', "website": "https://code.launchpad.net/openobject-croatia", 'depends': [ 'account', 'account_chart', ], 'data': [ 'data/account.account.type.csv', 'data/account.tax.code.template.csv', 'data/account.account.template.csv', 'l10n_hr_chart_template.xml', 'l10n_hr_wizard.xml', 'data/account.tax.template.csv', 'data/fiscal_position_template.xml', ], "demo": [], 'test': [], "active": False, "installable": True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
smutt/dpkt
dpkt/arp.py
3
1143
# $Id: arp.py 23 2006-11-08 15:45:33Z dugsong $ # -*- coding: utf-8 -*- """Address Resolution Protocol.""" from __future__ import absolute_import from . import dpkt # Hardware address format ARP_HRD_ETH = 0x0001 # ethernet hardware ARP_HRD_IEEE802 = 0x0006 # IEEE 802 hardware # Protocol address format ARP_PRO_IP = 0x0800 # IP protocol # ARP operation ARP_OP_REQUEST = 1 # request to resolve ha given pa ARP_OP_REPLY = 2 # response giving hardware address ARP_OP_REVREQUEST = 3 # request to resolve pa given ha ARP_OP_REVREPLY = 4 # response giving protocol address class ARP(dpkt.Packet): """Address Resolution Protocol. See more about the ARP on \ https://en.wikipedia.org/wiki/Address_Resolution_Protocol Attributes: __hdr__: Header fields of ARP. """ __hdr__ = ( ('hrd', 'H', ARP_HRD_ETH), ('pro', 'H', ARP_PRO_IP), ('hln', 'B', 6), # hardware address length ('pln', 'B', 4), # protocol address length ('op', 'H', ARP_OP_REQUEST), ('sha', '6s', ''), ('spa', '4s', ''), ('tha', '6s', ''), ('tpa', '4s', '') )
bsd-3-clause
YangSongzhou/django
django/forms/forms.py
27
19335
""" Form classes """ from __future__ import unicode_literals import copy from collections import OrderedDict from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.fields import Field, FileField # pretty_name is imported for backwards compatibility in Django 1.9 from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA from django.forms.widgets import Media, MediaDefiningClass from django.utils import six from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.html import conditional_escape, html_safe from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ __all__ = ('BaseForm', 'Form') class DeclarativeFieldsMetaclass(MediaDefiningClass): """ Metaclass that collects Fields declared on the base classes. """ def __new__(mcs, name, bases, attrs): # Collect fields from current class. current_fields = [] for key, value in list(attrs.items()): if isinstance(value, Field): current_fields.append((key, value)) attrs.pop(key) current_fields.sort(key=lambda x: x[1].creation_counter) attrs['declared_fields'] = OrderedDict(current_fields) new_class = (super(DeclarativeFieldsMetaclass, mcs) .__new__(mcs, name, bases, attrs)) # Walk through the MRO. declared_fields = OrderedDict() for base in reversed(new_class.__mro__): # Collect fields from base class. if hasattr(base, 'declared_fields'): declared_fields.update(base.declared_fields) # Field shadowing. for attr, value in base.__dict__.items(): if value is None and attr in declared_fields: declared_fields.pop(attr) new_class.base_fields = declared_fields new_class.declared_fields = declared_fields return new_class @html_safe @python_2_unicode_compatible class BaseForm(object): # This is the main implementation of all the Form logic. Note that this # class is different than Form. See the comments by the Form class for more # information. Any improvements to the form API should be made to *this* # class, not to the Form class. field_order = None prefix = None def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, field_order=None): self.is_bound = data is not None or files is not None self.data = data or {} self.files = files or {} self.auto_id = auto_id if prefix is not None: self.prefix = prefix self.initial = initial or {} self.error_class = error_class # Translators: This is the default suffix added to form field labels self.label_suffix = label_suffix if label_suffix is not None else _(':') self.empty_permitted = empty_permitted self._errors = None # Stores the errors after clean() has been called. # The base_fields class attribute is the *class-wide* definition of # fields. Because a particular *instance* of the class might want to # alter self.fields, we create self.fields here by copying base_fields. # Instances should always modify self.fields; they should not modify # self.base_fields. self.fields = copy.deepcopy(self.base_fields) self._bound_fields_cache = {} self.order_fields(self.field_order if field_order is None else field_order) def order_fields(self, field_order): """ Rearranges the fields according to field_order. field_order is a list of field names specifying the order. Fields not included in the list are appended in the default order for backward compatibility with subclasses not overriding field_order. If field_order is None, all fields are kept in the order defined in the class. Unknown fields in field_order are ignored to allow disabling fields in form subclasses without redefining ordering. """ if field_order is None: return fields = OrderedDict() for key in field_order: try: fields[key] = self.fields.pop(key) except KeyError: # ignore unknown fields pass fields.update(self.fields) # add remaining fields in original order self.fields = fields def __str__(self): return self.as_table() def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = self.is_bound and not bool(self._errors) return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % { 'cls': self.__class__.__name__, 'bound': self.is_bound, 'valid': is_valid, 'fields': ';'.join(self.fields), } def __iter__(self): for name in self.fields: yield self[name] def __getitem__(self, name): "Returns a BoundField with the given name." try: field = self.fields[name] except KeyError: raise KeyError( "Key %r not found in '%s'" % (name, self.__class__.__name__)) if name not in self._bound_fields_cache: self._bound_fields_cache[name] = field.get_bound_field(self, name) return self._bound_fields_cache[name] @property def errors(self): "Returns an ErrorDict for the data provided for the form" if self._errors is None: self.full_clean() return self._errors def is_valid(self): """ Returns True if the form has no errors. Otherwise, False. If errors are being ignored, returns False. """ return self.is_bound and not self.errors def add_prefix(self, field_name): """ Returns the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override. """ return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name def add_initial_prefix(self, field_name): """ Add a 'initial' prefix for checking dynamic initial values """ return 'initial-%s' % self.add_prefix(field_name) def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row): "Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()." top_errors = self.non_field_errors() # Errors that should be displayed above all fields. output, hidden_fields = [], [] for name, field in self.fields.items(): html_class_attr = '' bf = self[name] # Escape and cache in local variable. bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) if bf.is_hidden: if bf_errors: top_errors.extend( [_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)} for e in bf_errors]) hidden_fields.append(six.text_type(bf)) else: # Create a 'class="..."' attribute if the row should have any # CSS classes applied. css_classes = bf.css_classes() if css_classes: html_class_attr = ' class="%s"' % css_classes if errors_on_separate_row and bf_errors: output.append(error_row % force_text(bf_errors)) if bf.label: label = conditional_escape(force_text(bf.label)) label = bf.label_tag(label) or '' else: label = '' if field.help_text: help_text = help_text_html % force_text(field.help_text) else: help_text = '' output.append(normal_row % { 'errors': force_text(bf_errors), 'label': force_text(label), 'field': six.text_type(bf), 'help_text': help_text, 'html_class_attr': html_class_attr, 'css_classes': css_classes, 'field_name': bf.html_name, }) if top_errors: output.insert(0, error_row % force_text(top_errors)) if hidden_fields: # Insert any hidden fields in the last row. str_hidden = ''.join(hidden_fields) if output: last_row = output[-1] # Chop off the trailing row_ender (e.g. '</td></tr>') and # insert the hidden fields. if not last_row.endswith(row_ender): # This can happen in the as_p() case (and possibly others # that users write): if there are only top errors, we may # not be able to conscript the last row for our purposes, # so insert a new, empty row. last_row = (normal_row % { 'errors': '', 'label': '', 'field': '', 'help_text': '', 'html_class_attr': html_class_attr, 'css_classes': '', 'field_name': '', }) output.append(last_row) output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender else: # If there aren't any rows in the output, just append the # hidden fields. output.append(str_hidden) return mark_safe('\n'.join(output)) def as_table(self): "Returns this form rendered as HTML <tr>s -- excluding the <table></table>." return self._html_output( normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>', error_row='<tr><td colspan="2">%s</td></tr>', row_ender='</td></tr>', help_text_html='<br /><span class="helptext">%s</span>', errors_on_separate_row=False) def as_ul(self): "Returns this form rendered as HTML <li>s -- excluding the <ul></ul>." return self._html_output( normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>', error_row='<li>%s</li>', row_ender='</li>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=False) def as_p(self): "Returns this form rendered as HTML <p>s." return self._html_output( normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>', error_row='%s', row_ender='</p>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=True) def non_field_errors(self): """ Returns an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Returns an empty ErrorList if there are none. """ return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield')) def add_error(self, field, error): """ Update the content of `self._errors`. The `field` argument is the name of the field to which the errors should be added. If its value is None the errors will be treated as NON_FIELD_ERRORS. The `error` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. What we define as an "error" can be either a simple string or an instance of ValidationError with its message attribute set and what we define as list or dictionary can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. If `error` is a dictionary, the `field` argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary. """ if not isinstance(error, ValidationError): # Normalize to ValidationError and let its constructor # do the hard work of making sense of the input. error = ValidationError(error) if hasattr(error, 'error_dict'): if field is not None: raise TypeError( "The argument `field` must be `None` when the `error` " "argument contains errors for multiple fields." ) else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field != NON_FIELD_ERRORS and field not in self.fields: raise ValueError( "'%s' has no field named '%s'." % (self.__class__.__name__, field)) if field == NON_FIELD_ERRORS: self._errors[field] = self.error_class(error_class='nonfield') else: self._errors[field] = self.error_class() self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field] def has_error(self, field, code=None): if code is None: return field in self.errors if field in self.errors: for error in self.errors.as_data()[field]: if error.code == code: return True return False def full_clean(self): """ Cleans all of self.data and populates self._errors and self.cleaned_data. """ self._errors = ErrorDict() if not self.is_bound: # Stop further processing. return self.cleaned_data = {} # If the form is permitted to be empty, and none of the form data has # changed from the initial data, short circuit any validation. if self.empty_permitted and not self.has_changed(): return self._clean_fields() self._clean_form() self._post_clean() def _clean_fields(self): for name, field in self.fields.items(): # value_from_datadict() gets the data from the data dictionaries. # Each widget type knows how to retrieve its own data, because some # widgets split data over several HTML fields. if field.disabled: value = self.initial.get(name, field.initial) else: value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name)) try: if isinstance(field, FileField): initial = self.initial.get(name, field.initial) value = field.clean(value, initial) else: value = field.clean(value) self.cleaned_data[name] = value if hasattr(self, 'clean_%s' % name): value = getattr(self, 'clean_%s' % name)() self.cleaned_data[name] = value except ValidationError as e: self.add_error(name, e) def _clean_form(self): try: cleaned_data = self.clean() except ValidationError as e: self.add_error(None, e) else: if cleaned_data is not None: self.cleaned_data = cleaned_data def _post_clean(self): """ An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms. """ pass def clean(self): """ Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'. """ return self.cleaned_data def has_changed(self): """ Returns True if data differs from initial. """ return bool(self.changed_data) @cached_property def changed_data(self): data = [] for name, field in self.fields.items(): prefixed_name = self.add_prefix(name) data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name) if not field.show_hidden_initial: initial_value = self.initial.get(name, field.initial) if callable(initial_value): initial_value = initial_value() else: initial_prefixed_name = self.add_initial_prefix(name) hidden_widget = field.hidden_widget() try: initial_value = field.to_python(hidden_widget.value_from_datadict( self.data, self.files, initial_prefixed_name)) except ValidationError: # Always assume data has changed if validation fails. data.append(name) continue if field.has_changed(initial_value, data_value): data.append(name) return data @property def media(self): """ Provide a description of all media required to render the widgets on this form """ media = Media() for field in self.fields.values(): media = media + field.widget.media return media def is_multipart(self): """ Returns True if the form needs to be multipart-encoded, i.e. it has FileInput. Otherwise, False. """ for field in self.fields.values(): if field.widget.needs_multipart_form: return True return False def hidden_fields(self): """ Returns a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates. """ return [field for field in self if field.is_hidden] def visible_fields(self): """ Returns a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method. """ return [field for field in self if not field.is_hidden] class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)): "A collection of Fields, plus their associated data." # This is a separate class from BaseForm in order to abstract the way # self.fields is specified. This class (Form) is the one that does the # fancy metaclass stuff purely for the semantic sugar -- it allows one # to define a form using declarative syntax. # BaseForm itself has no way of designating self.fields.
bsd-3-clause
adoosii/edx-platform
lms/djangoapps/instructor/tests/views/test_instructor_dashboard.py
37
11133
""" Unit tests for instructor_dashboard.py. """ import ddt from mock import patch from django.conf import settings from django.core.urlresolvers import reverse from django.test.client import RequestFactory from django.test.utils import override_settings from courseware.tabs import get_course_tab_list from courseware.tests.factories import UserFactory from courseware.tests.helpers import LoginEnrollmentTestCase from common.test.utils import XssTestMixin from student.tests.factories import AdminFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from shoppingcart.models import PaidCourseRegistration, Order, CourseRegCodeItem from course_modes.models import CourseMode from student.roles import CourseFinanceAdminRole from student.models import CourseEnrollment @ddt.ddt class TestInstructorDashboard(ModuleStoreTestCase, LoginEnrollmentTestCase, XssTestMixin): """ Tests for the instructor dashboard (not legacy). """ def setUp(self): """ Set up tests """ super(TestInstructorDashboard, self).setUp() self.course = CourseFactory.create( grading_policy={"GRADE_CUTOFFS": {"A": 0.75, "B": 0.63, "C": 0.57, "D": 0.5}}, display_name='<script>alert("XSS")</script>' ) self.course_mode = CourseMode(course_id=self.course.id, mode_slug="honor", mode_display_name="honor cert", min_price=40) self.course_mode.save() # Create instructor account self.instructor = AdminFactory.create() self.client.login(username=self.instructor.username, password="test") # URL for instructor dash self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}) def get_dashboard_enrollment_message(self): """ Returns expected dashboard enrollment message with link to Insights. """ return 'Enrollment data is now available in <a href="http://example.com/courses/{}" ' \ 'target="_blank">Example</a>.'.format(unicode(self.course.id)) def get_dashboard_analytics_message(self): """ Returns expected dashboard demographic message with link to Insights. """ return 'For analytics about your course, go to <a href="http://example.com/courses/{}" ' \ 'target="_blank">Example</a>.'.format(unicode(self.course.id)) def test_instructor_tab(self): """ Verify that the instructor tab appears for staff only. """ def has_instructor_tab(user, course): """Returns true if the "Instructor" tab is shown.""" request = RequestFactory().request() request.user = user tabs = get_course_tab_list(request, course) return len([tab for tab in tabs if tab.name == 'Instructor']) == 1 self.assertTrue(has_instructor_tab(self.instructor, self.course)) student = UserFactory.create() self.assertFalse(has_instructor_tab(student, self.course)) def test_default_currency_in_the_html_response(self): """ Test that checks the default currency_symbol ($) in the response """ CourseFinanceAdminRole(self.course.id).add_users(self.instructor) total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id) response = self.client.get(self.url) self.assertTrue('${amount}'.format(amount=total_amount) in response.content) def test_course_name_xss(self): """Test that the instructor dashboard correctly escapes course names with script tags. """ response = self.client.get(self.url) self.assert_xss(response, '<script>alert("XSS")</script>') @override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs']) def test_override_currency_settings_in_the_html_response(self): """ Test that checks the default currency_symbol ($) in the response """ CourseFinanceAdminRole(self.course.id).add_users(self.instructor) total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id) response = self.client.get(self.url) self.assertIn('{currency}{amount}'.format(currency='Rs', amount=total_amount), response.content) @patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False}) @override_settings(ANALYTICS_DASHBOARD_URL='') def test_no_enrollments(self): """ Test enrollment section is hidden. """ response = self.client.get(self.url) # no enrollment information should be visible self.assertFalse('<h2>Enrollment Information</h2>' in response.content) @patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True}) @override_settings(ANALYTICS_DASHBOARD_URL='') def test_show_enrollments_data(self): """ Test enrollment data is shown. """ response = self.client.get(self.url) # enrollment information visible self.assertTrue('<h2>Enrollment Information</h2>' in response.content) self.assertTrue('<td>Verified</td>' in response.content) self.assertTrue('<td>Audit</td>' in response.content) self.assertTrue('<td>Honor</td>' in response.content) self.assertTrue('<td>Professional</td>' in response.content) # dashboard link hidden self.assertFalse(self.get_dashboard_enrollment_message() in response.content) @patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': True}) @override_settings(ANALYTICS_DASHBOARD_URL='') def test_show_enrollment_data_for_prof_ed(self): # Create both "professional" (meaning professional + verification) # and "no-id-professional" (meaning professional without verification) # These should be aggregated for display purposes. users = [UserFactory() for _ in range(2)] CourseEnrollment.enroll(users[0], self.course.id, mode="professional") CourseEnrollment.enroll(users[1], self.course.id, mode="no-id-professional") response = self.client.get(self.url) # Check that the number of professional enrollments is two self.assertContains(response, "<td>Professional</td><td>2</td>") @patch.dict(settings.FEATURES, {'DISPLAY_ANALYTICS_ENROLLMENTS': False}) @override_settings(ANALYTICS_DASHBOARD_URL='http://example.com') @override_settings(ANALYTICS_DASHBOARD_NAME='Example') def test_show_dashboard_enrollment_message(self): """ Test enrollment dashboard message is shown and data is hidden. """ response = self.client.get(self.url) # enrollment information hidden self.assertFalse('<td>Verified</td>' in response.content) self.assertFalse('<td>Audit</td>' in response.content) self.assertFalse('<td>Honor</td>' in response.content) self.assertFalse('<td>Professional</td>' in response.content) # link to dashboard shown expected_message = self.get_dashboard_enrollment_message() self.assertTrue(expected_message in response.content) @override_settings(ANALYTICS_DASHBOARD_URL='') @override_settings(ANALYTICS_DASHBOARD_NAME='') def test_dashboard_analytics_tab_not_shown(self): """ Test dashboard analytics tab isn't shown if insights isn't configured. """ response = self.client.get(self.url) analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>' self.assertFalse(analytics_section in response.content) @override_settings(ANALYTICS_DASHBOARD_URL='http://example.com') @override_settings(ANALYTICS_DASHBOARD_NAME='Example') def test_dashboard_analytics_points_at_insights(self): """ Test analytics dashboard message is shown """ response = self.client.get(self.url) analytics_section = '<li class="nav-item"><a href="" data-section="instructor_analytics">Analytics</a></li>' self.assertTrue(analytics_section in response.content) # link to dashboard shown expected_message = self.get_dashboard_analytics_message() self.assertTrue(expected_message in response.content) def add_course_to_user_cart(self, cart, course_key): """ adding course to user cart """ reg_item = PaidCourseRegistration.add_to_order(cart, course_key) return reg_item @patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True}) def test_total_credit_cart_sales_amount(self): """ Test to check the total amount for all the credit card purchases. """ student = UserFactory.create() self.client.login(username=student.username, password="test") student_cart = Order.get_cart_for_user(student) item = self.add_course_to_user_cart(student_cart, self.course.id) resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4}) self.assertEqual(resp.status_code, 200) student_cart.purchase() self.client.login(username=self.instructor.username, password="test") CourseFinanceAdminRole(self.course.id).add_users(self.instructor) single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course.id) bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(self.course.id) total_amount = single_purchase_total + bulk_purchase_total response = self.client.get(self.url) self.assertIn('{currency}{amount}'.format(currency='$', amount=total_amount), response.content) @ddt.data( (True, True, True), (True, False, False), (True, None, False), (False, True, False), (False, False, False), (False, None, False), ) @ddt.unpack def test_ccx_coaches_option_on_admin_list_management_instructor( self, ccx_feature_flag, enable_ccx, expected_result ): """ Test whether the "CCX Coaches" option is visible or hidden depending on the value of course.enable_ccx. """ with patch.dict(settings.FEATURES, {'CUSTOM_COURSES_EDX': ccx_feature_flag}): self.course.enable_ccx = enable_ccx self.store.update_item(self.course, self.instructor.id) response = self.client.get(self.url) self.assertEquals( expected_result, 'CCX Coaches are able to create their own Custom Courses based on this course' in response.content ) def test_grade_cutoffs(self): """ Verify that grade cutoffs are displayed in the correct order. """ response = self.client.get(self.url) self.assertIn('D: 0.5, C: 0.57, B: 0.63, A: 0.75', response.content)
agpl-3.0
consulo/consulo-mercurial
src/test/resources/bin/mercurial/templatekw.py
92
13228
# templatekw.py - common changeset template keywords # # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from node import hex import patch, util, error import hbisect # This helper class allows us to handle both: # "{files}" (legacy command-line-specific list hack) and # "{files % '{file}\n'}" (hgweb-style with inlining and function support) class _hybrid(object): def __init__(self, gen, values, joinfmt=None): self.gen = gen self.values = values if joinfmt: self.joinfmt = joinfmt else: self.joinfmt = lambda x: x.values()[0] def __iter__(self): return self.gen def __call__(self): for x in self.values: yield x def showlist(name, values, plural=None, element=None, **args): if not element: element = name f = _showlist(name, values, plural, **args) return _hybrid(f, [{element: x} for x in values]) def _showlist(name, values, plural=None, **args): '''expand set of values. name is name of key in template map. values is list of strings or dicts. plural is plural of name, if not simply name + 's'. expansion works like this, given name 'foo'. if values is empty, expand 'no_foos'. if 'foo' not in template map, return values as a string, joined by space. expand 'start_foos'. for each value, expand 'foo'. if 'last_foo' in template map, expand it instead of 'foo' for last key. expand 'end_foos'. ''' templ = args['templ'] if plural: names = plural else: names = name + 's' if not values: noname = 'no_' + names if noname in templ: yield templ(noname, **args) return if name not in templ: if isinstance(values[0], str): yield ' '.join(values) else: for v in values: yield dict(v, **args) return startname = 'start_' + names if startname in templ: yield templ(startname, **args) vargs = args.copy() def one(v, tag=name): try: vargs.update(v) except (AttributeError, ValueError): try: for a, b in v: vargs[a] = b except ValueError: vargs[name] = v return templ(tag, **vargs) lastname = 'last_' + name if lastname in templ: last = values.pop() else: last = None for v in values: yield one(v) if last is not None: yield one(last, tag=lastname) endname = 'end_' + names if endname in templ: yield templ(endname, **args) def getfiles(repo, ctx, revcache): if 'files' not in revcache: revcache['files'] = repo.status(ctx.p1().node(), ctx.node())[:3] return revcache['files'] def getlatesttags(repo, ctx, cache): '''return date, distance and name for the latest tag of rev''' if 'latesttags' not in cache: # Cache mapping from rev to a tuple with tag date, tag # distance and tag name cache['latesttags'] = {-1: (0, 0, 'null')} latesttags = cache['latesttags'] rev = ctx.rev() todo = [rev] while todo: rev = todo.pop() if rev in latesttags: continue ctx = repo[rev] tags = [t for t in ctx.tags() if repo.tagtype(t) == 'global'] if tags: latesttags[rev] = ctx.date()[0], 0, ':'.join(sorted(tags)) continue try: # The tuples are laid out so the right one can be found by # comparison. pdate, pdist, ptag = max( latesttags[p.rev()] for p in ctx.parents()) except KeyError: # Cache miss - recurse todo.append(rev) todo.extend(p.rev() for p in ctx.parents()) continue latesttags[rev] = pdate, pdist + 1, ptag return latesttags[rev] def getrenamedfn(repo, endrev=None): rcache = {} if endrev is None: endrev = len(repo) def getrenamed(fn, rev): '''looks up all renames for a file (up to endrev) the first time the file is given. It indexes on the changerev and only parses the manifest if linkrev != changerev. Returns rename info for fn at changerev rev.''' if fn not in rcache: rcache[fn] = {} fl = repo.file(fn) for i in fl: lr = fl.linkrev(i) renamed = fl.renamed(fl.node(i)) rcache[fn][lr] = renamed if lr >= endrev: break if rev in rcache[fn]: return rcache[fn][rev] # If linkrev != rev (i.e. rev not found in rcache) fallback to # filectx logic. try: return repo[rev][fn].renamed() except error.LookupError: return None return getrenamed def showauthor(repo, ctx, templ, **args): """:author: String. The unmodified author of the changeset.""" return ctx.user() def showbisect(repo, ctx, templ, **args): """:bisect: String. The changeset bisection status.""" return hbisect.label(repo, ctx.node()) def showbranch(**args): """:branch: String. The name of the branch on which the changeset was committed. """ return args['ctx'].branch() def showbranches(**args): """:branches: List of strings. The name of the branch on which the changeset was committed. Will be empty if the branch name was default. """ branch = args['ctx'].branch() if branch != 'default': return showlist('branch', [branch], plural='branches', **args) def showbookmarks(**args): """:bookmarks: List of strings. Any bookmarks associated with the changeset. """ bookmarks = args['ctx'].bookmarks() return showlist('bookmark', bookmarks, **args) def showchildren(**args): """:children: List of strings. The children of the changeset.""" ctx = args['ctx'] childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()] return showlist('children', childrevs, element='child', **args) def showdate(repo, ctx, templ, **args): """:date: Date information. The date when the changeset was committed.""" return ctx.date() def showdescription(repo, ctx, templ, **args): """:desc: String. The text of the changeset description.""" return ctx.description().strip() def showdiffstat(repo, ctx, templ, **args): """:diffstat: String. Statistics of changes with the following format: "modified files: +added/-removed lines" """ stats = patch.diffstatdata(util.iterlines(ctx.diff())) maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats) return '%s: +%s/-%s' % (len(stats), adds, removes) def showextras(**args): templ = args['templ'] for key, value in sorted(args['ctx'].extra().items()): args = args.copy() args.update(dict(key=key, value=value)) yield templ('extra', **args) def showfileadds(**args): """:file_adds: List of strings. Files added by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_add', getfiles(repo, ctx, revcache)[1], element='file', **args) def showfilecopies(**args): """:file_copies: List of strings. Files copied in this changeset with their sources. """ cache, ctx = args['cache'], args['ctx'] copies = args['revcache'].get('copies') if copies is None: if 'getrenamed' not in cache: cache['getrenamed'] = getrenamedfn(args['repo']) copies = [] getrenamed = cache['getrenamed'] for fn in ctx.files(): rename = getrenamed(fn, ctx.rev()) if rename: copies.append((fn, rename[0])) c = [{'name': x[0], 'source': x[1]} for x in copies] f = _showlist('file_copy', c, plural='file_copies', **args) return _hybrid(f, c, lambda x: '%s (%s)' % (x['name'], x['source'])) # showfilecopiesswitch() displays file copies only if copy records are # provided before calling the templater, usually with a --copies # command line switch. def showfilecopiesswitch(**args): """:file_copies_switch: List of strings. Like "file_copies" but displayed only if the --copied switch is set. """ copies = args['revcache'].get('copies') or [] c = [{'name': x[0], 'source': x[1]} for x in copies] f = _showlist('file_copy', c, plural='file_copies', **args) return _hybrid(f, c, lambda x: '%s (%s)' % (x['name'], x['source'])) def showfiledels(**args): """:file_dels: List of strings. Files removed by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_del', getfiles(repo, ctx, revcache)[2], element='file', **args) def showfilemods(**args): """:file_mods: List of strings. Files modified by this changeset.""" repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] return showlist('file_mod', getfiles(repo, ctx, revcache)[0], element='file', **args) def showfiles(**args): """:files: List of strings. All files modified, added, or removed by this changeset. """ return showlist('file', args['ctx'].files(), **args) def showlatesttag(repo, ctx, templ, cache, **args): """:latesttag: String. Most recent global tag in the ancestors of this changeset. """ return getlatesttags(repo, ctx, cache)[2] def showlatesttagdistance(repo, ctx, templ, cache, **args): """:latesttagdistance: Integer. Longest path to the latest tag.""" return getlatesttags(repo, ctx, cache)[1] def showmanifest(**args): repo, ctx, templ = args['repo'], args['ctx'], args['templ'] args = args.copy() args.update(dict(rev=repo.manifest.rev(ctx.changeset()[0]), node=hex(ctx.changeset()[0]))) return templ('manifest', **args) def shownode(repo, ctx, templ, **args): """:node: String. The changeset identification hash, as a 40 hexadecimal digit string. """ return ctx.hex() def showp1rev(repo, ctx, templ, **args): """:p1rev: Integer. The repository-local revision number of the changeset's first parent, or -1 if the changeset has no parents.""" return ctx.p1().rev() def showp2rev(repo, ctx, templ, **args): """:p2rev: Integer. The repository-local revision number of the changeset's second parent, or -1 if the changeset has no second parent.""" return ctx.p2().rev() def showp1node(repo, ctx, templ, **args): """:p1node: String. The identification hash of the changeset's first parent, as a 40 digit hexadecimal string. If the changeset has no parents, all digits are 0.""" return ctx.p1().hex() def showp2node(repo, ctx, templ, **args): """:p2node: String. The identification hash of the changeset's second parent, as a 40 digit hexadecimal string. If the changeset has no second parent, all digits are 0.""" return ctx.p2().hex() def showphase(repo, ctx, templ, **args): """:phase: String. The changeset phase name.""" return ctx.phasestr() def showphaseidx(repo, ctx, templ, **args): """:phaseidx: Integer. The changeset phase index.""" return ctx.phase() def showrev(repo, ctx, templ, **args): """:rev: Integer. The repository-local changeset revision number.""" return ctx.rev() def showtags(**args): """:tags: List of strings. Any tags associated with the changeset.""" return showlist('tag', args['ctx'].tags(), **args) # keywords are callables like: # fn(repo, ctx, templ, cache, revcache, **args) # with: # repo - current repository instance # ctx - the changectx being displayed # templ - the templater instance # cache - a cache dictionary for the whole templater run # revcache - a cache dictionary for the current revision keywords = { 'author': showauthor, 'bisect': showbisect, 'branch': showbranch, 'branches': showbranches, 'bookmarks': showbookmarks, 'children': showchildren, 'date': showdate, 'desc': showdescription, 'diffstat': showdiffstat, 'extras': showextras, 'file_adds': showfileadds, 'file_copies': showfilecopies, 'file_copies_switch': showfilecopiesswitch, 'file_dels': showfiledels, 'file_mods': showfilemods, 'files': showfiles, 'latesttag': showlatesttag, 'latesttagdistance': showlatesttagdistance, 'manifest': showmanifest, 'node': shownode, 'p1rev': showp1rev, 'p1node': showp1node, 'p2rev': showp2rev, 'p2node': showp2node, 'phase': showphase, 'phaseidx': showphaseidx, 'rev': showrev, 'tags': showtags, } def _showparents(**args): """:parents: List of strings. The parents of the changeset in "rev:node" format. If the changeset has only one "natural" parent (the predecessor revision) nothing is shown.""" pass dockeywords = { 'parents': _showparents, } dockeywords.update(keywords) # tell hggettext to extract docstrings from these functions: i18nfunctions = dockeywords.values()
apache-2.0
ppanczyk/ansible
lib/ansible/plugins/callback/log_plays.py
23
3178
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' callback: log_plays type: notification short_description: write playbook output to log file version_added: historical description: - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory - "TODO: make this configurable" requirements: - Whitelist in configuration - A writeable /var/log/ansible/hosts directory by the user executing Ansbile on the controller ''' import os import time import json from collections import MutableMapping from ansible.module_utils._text import to_bytes from ansible.plugins.callback import CallbackBase # NOTE: in Ansible 1.2 or later general logging is available without # this plugin, just set ANSIBLE_LOG_PATH as an environment variable # or log_path in the DEFAULTS section of your ansible configuration # file. This callback is an example of per hosts logging for those # that want it. class CallbackModule(CallbackBase): """ logs playbook results, per host, in /var/log/ansible/hosts """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' CALLBACK_NAME = 'log_plays' CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n" def __init__(self): super(CallbackModule, self).__init__() if not os.path.exists("/var/log/ansible/hosts"): os.makedirs("/var/log/ansible/hosts") def log(self, host, category, data): if isinstance(data, MutableMapping): if '_ansible_verbose_override' in data: # avoid logging extraneous data data = 'omitted' else: data = data.copy() invocation = data.pop('invocation', None) data = json.dumps(data) if invocation is not None: data = json.dumps(invocation) + " => %s " % data path = os.path.join("/var/log/ansible/hosts", host) now = time.strftime(self.TIME_FORMAT, time.localtime()) msg = to_bytes(self.MSG_FORMAT % dict(now=now, category=category, data=data)) with open(path, "ab") as fd: fd.write(msg) def runner_on_failed(self, host, res, ignore_errors=False): self.log(host, 'FAILED', res) def runner_on_ok(self, host, res): self.log(host, 'OK', res) def runner_on_skipped(self, host, item=None): self.log(host, 'SKIPPED', '...') def runner_on_unreachable(self, host, res): self.log(host, 'UNREACHABLE', res) def runner_on_async_failed(self, host, res, jid): self.log(host, 'ASYNC_FAILED', res) def playbook_on_import_for_host(self, host, imported_file): self.log(host, 'IMPORTED', imported_file) def playbook_on_not_import_for_host(self, host, missing_file): self.log(host, 'NOTIMPORTED', missing_file)
gpl-3.0
prantlf/node-gyp
gyp/pylib/gyp/MSVSUtil.py
566
9386
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions shared amongst the Windows generators.""" import copy import os _TARGET_TYPE_EXT = { 'executable': '.exe', 'loadable_module': '.dll', 'shared_library': '.dll', } def _GetLargePdbShimCcPath(): """Returns the path of the large_pdb_shim.cc file.""" this_dir = os.path.abspath(os.path.dirname(__file__)) src_dir = os.path.abspath(os.path.join(this_dir, '..', '..')) win_data_dir = os.path.join(src_dir, 'data', 'win') large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc') return large_pdb_shim_cc def _DeepCopySomeKeys(in_dict, keys): """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. Arguments: in_dict: The dictionary to copy. keys: The keys to be copied. If a key is in this list and doesn't exist in |in_dict| this is not an error. Returns: The partially deep-copied dictionary. """ d = {} for key in keys: if key not in in_dict: continue d[key] = copy.deepcopy(in_dict[key]) return d def _SuffixName(name, suffix): """Add a suffix to the end of a target. Arguments: name: name of the target (foo#target) suffix: the suffix to be added Returns: Target name with suffix added (foo_suffix#target) """ parts = name.rsplit('#', 1) parts[0] = '%s_%s' % (parts[0], suffix) return '#'.join(parts) def _ShardName(name, number): """Add a shard number to the end of a target. Arguments: name: name of the target (foo#target) number: shard number Returns: Target name with shard added (foo_1#target) """ return _SuffixName(name, str(number)) def ShardTargets(target_list, target_dicts): """Shard some targets apart to work around the linkers limits. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. Returns: Tuple of the new sharded versions of the inputs. """ # Gather the targets to shard, and how many pieces. targets_to_shard = {} for t in target_dicts: shards = int(target_dicts[t].get('msvs_shard', 0)) if shards: targets_to_shard[t] = shards # Shard target_list. new_target_list = [] for t in target_list: if t in targets_to_shard: for i in range(targets_to_shard[t]): new_target_list.append(_ShardName(t, i)) else: new_target_list.append(t) # Shard target_dict. new_target_dicts = {} for t in target_dicts: if t in targets_to_shard: for i in range(targets_to_shard[t]): name = _ShardName(t, i) new_target_dicts[name] = copy.copy(target_dicts[t]) new_target_dicts[name]['target_name'] = _ShardName( new_target_dicts[name]['target_name'], i) sources = new_target_dicts[name].get('sources', []) new_sources = [] for pos in range(i, len(sources), targets_to_shard[t]): new_sources.append(sources[pos]) new_target_dicts[name]['sources'] = new_sources else: new_target_dicts[t] = target_dicts[t] # Shard dependencies. for t in new_target_dicts: dependencies = copy.copy(new_target_dicts[t].get('dependencies', [])) new_dependencies = [] for d in dependencies: if d in targets_to_shard: for i in range(targets_to_shard[d]): new_dependencies.append(_ShardName(d, i)) else: new_dependencies.append(d) new_target_dicts[t]['dependencies'] = new_dependencies return (new_target_list, new_target_dicts) def _GetPdbPath(target_dict, config_name, vars): """Returns the path to the PDB file that will be generated by a given configuration. The lookup proceeds as follows: - Look for an explicit path in the VCLinkerTool configuration block. - Look for an 'msvs_large_pdb_path' variable. - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is specified. - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'. Arguments: target_dict: The target dictionary to be searched. config_name: The name of the configuration of interest. vars: A dictionary of common GYP variables with generator-specific values. Returns: The path of the corresponding PDB file. """ config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.get('VCLinkerTool', {}) pdb_path = linker.get('ProgramDatabaseFile') if pdb_path: return pdb_path variables = target_dict.get('variables', {}) pdb_path = variables.get('msvs_large_pdb_path', None) if pdb_path: return pdb_path pdb_base = target_dict.get('product_name', target_dict['target_name']) pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']]) pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base return pdb_path def InsertLargePdbShims(target_list, target_dicts, vars): """Insert a shim target that forces the linker to use 4KB pagesize PDBs. This is a workaround for targets with PDBs greater than 1GB in size, the limit for the 1KB pagesize PDBs created by the linker by default. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. vars: A dictionary of common GYP variables with generator-specific values. Returns: Tuple of the shimmed version of the inputs. """ # Determine which targets need shimming. targets_to_shim = [] for t in target_dicts: target_dict = target_dicts[t] # We only want to shim targets that have msvs_large_pdb enabled. if not int(target_dict.get('msvs_large_pdb', 0)): continue # This is intended for executable, shared_library and loadable_module # targets where every configuration is set up to produce a PDB output. # If any of these conditions is not true then the shim logic will fail # below. targets_to_shim.append(t) large_pdb_shim_cc = _GetLargePdbShimCcPath() for t in targets_to_shim: target_dict = target_dicts[t] target_name = target_dict.get('target_name') base_dict = _DeepCopySomeKeys(target_dict, ['configurations', 'default_configuration', 'toolset']) # This is the dict for copying the source file (part of the GYP tree) # to the intermediate directory of the project. This is necessary because # we can't always build a relative path to the shim source file (on Windows # GYP and the project may be on different drives), and Ninja hates absolute # paths (it ends up generating the .obj and .obj.d alongside the source # file, polluting GYPs tree). copy_suffix = 'large_pdb_copy' copy_target_name = target_name + '_' + copy_suffix full_copy_target_name = _SuffixName(t, copy_suffix) shim_cc_basename = os.path.basename(large_pdb_shim_cc) shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name shim_cc_path = shim_cc_dir + '/' + shim_cc_basename copy_dict = copy.deepcopy(base_dict) copy_dict['target_name'] = copy_target_name copy_dict['type'] = 'none' copy_dict['sources'] = [ large_pdb_shim_cc ] copy_dict['copies'] = [{ 'destination': shim_cc_dir, 'files': [ large_pdb_shim_cc ] }] # This is the dict for the PDB generating shim target. It depends on the # copy target. shim_suffix = 'large_pdb_shim' shim_target_name = target_name + '_' + shim_suffix full_shim_target_name = _SuffixName(t, shim_suffix) shim_dict = copy.deepcopy(base_dict) shim_dict['target_name'] = shim_target_name shim_dict['type'] = 'static_library' shim_dict['sources'] = [ shim_cc_path ] shim_dict['dependencies'] = [ full_copy_target_name ] # Set up the shim to output its PDB to the same location as the final linker # target. for config_name, config in shim_dict.get('configurations').iteritems(): pdb_path = _GetPdbPath(target_dict, config_name, vars) # A few keys that we don't want to propagate. for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']: config.pop(key, None) msvs = config.setdefault('msvs_settings', {}) # Update the compiler directives in the shim target. compiler = msvs.setdefault('VCCLCompilerTool', {}) compiler['DebugInformationFormat'] = '3' compiler['ProgramDataBaseFileName'] = pdb_path # Set the explicit PDB path in the appropriate configuration of the # original target. config = target_dict['configurations'][config_name] msvs = config.setdefault('msvs_settings', {}) linker = msvs.setdefault('VCLinkerTool', {}) linker['GenerateDebugInformation'] = 'true' linker['ProgramDatabaseFile'] = pdb_path # Add the new targets. They must go to the beginning of the list so that # the dependency generation works as expected in ninja. target_list.insert(0, full_copy_target_name) target_list.insert(0, full_shim_target_name) target_dicts[full_copy_target_name] = copy_dict target_dicts[full_shim_target_name] = shim_dict # Update the original target to depend on the shim target. target_dict.setdefault('dependencies', []).append(full_shim_target_name) return (target_list, target_dicts)
mit
varuntiwari27/rally
todo-api/flask/lib/python2.7/site-packages/setuptools/package_index.py
32
39963
"""PyPI and direct package downloading""" import sys import os import re import shutil import socket import base64 import hashlib import itertools from functools import wraps try: from urllib.parse import splituser except ImportError: from urllib2 import splituser from setuptools.extern import six from setuptools.extern.six.moves import urllib, http_client, configparser, map import setuptools from pkg_resources import ( CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, require, Environment, find_distributions, safe_name, safe_version, to_filename, Requirement, DEVELOP_DIST, ) from setuptools import ssl_support from distutils import log from distutils.errors import DistutilsError from fnmatch import translate from setuptools.py26compat import strip_fragment from setuptools.py27compat import get_all_headers EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) # this is here to fix emacs' cruddy broken syntax highlighting PYPI_MD5 = re.compile( '<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)' 'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\\)' ) URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() __all__ = [ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', 'interpret_distro_name', ] _SOCKET_TIMEOUT = 15 _tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" user_agent = _tmpl.format(py_major=sys.version[:3], **globals()) def parse_requirement_arg(spec): try: return Requirement.parse(spec) except ValueError: raise DistutilsError( "Not a URL, existing file, or requirement spec: %r" % (spec,) ) def parse_bdist_wininst(name): """Return (base,pyversion) or (None,None) for possible .exe name""" lower = name.lower() base, py_ver, plat = None, None, None if lower.endswith('.exe'): if lower.endswith('.win32.exe'): base = name[:-10] plat = 'win32' elif lower.startswith('.win32-py', -16): py_ver = name[-7:-4] base = name[:-16] plat = 'win32' elif lower.endswith('.win-amd64.exe'): base = name[:-14] plat = 'win-amd64' elif lower.startswith('.win-amd64-py', -20): py_ver = name[-7:-4] base = name[:-20] plat = 'win-amd64' return base, py_ver, plat def egg_info_for_url(url): parts = urllib.parse.urlparse(url) scheme, server, path, parameters, query, fragment = parts base = urllib.parse.unquote(path.split('/')[-1]) if server == 'sourceforge.net' and base == 'download': # XXX Yuck base = urllib.parse.unquote(path.split('/')[-2]) if '#' in base: base, fragment = base.split('#', 1) return base, fragment def distros_for_url(url, metadata=None): """Yield egg or source distribution objects that might be found at a URL""" base, fragment = egg_info_for_url(url) for dist in distros_for_location(url, base, metadata): yield dist if fragment: match = EGG_FRAGMENT.match(fragment) if match: for dist in interpret_distro_name( url, match.group(1), metadata, precedence=CHECKOUT_DIST ): yield dist def distros_for_location(location, basename, metadata=None): """Yield egg or source distribution objects based on basename""" if basename.endswith('.egg.zip'): basename = basename[:-4] # strip the .zip if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: return interpret_distro_name( location, win_base, metadata, py_ver, BINARY_DIST, platform ) # Try source distro extensions (.zip, .tgz, etc.) # for ext in EXTENSIONS: if basename.endswith(ext): basename = basename[:-len(ext)] return interpret_distro_name(location, basename, metadata) return [] # no extension matched def distros_for_filename(filename, metadata=None): """Yield possible egg or source distribution objects based on a filename""" return distros_for_location( normalize_path(filename), os.path.basename(filename), metadata ) def interpret_distro_name( location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None ): """Generate alternative interpretations of a source distro name Note: if `location` is a filesystem filename, you should call ``pkg_resources.normalize_path()`` on it before passing it to this routine! """ # Generate alternative interpretations of a source distro name # Because some packages are ambiguous as to name/versions split # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, # the spurious interpretations should be ignored, because in the event # there's also an "adns" package, the spurious "python-1.1.0" version will # compare lower than any numeric version number, and is therefore unlikely # to match a request for it. It's still a potential problem, though, and # in the long run PyPI and the distutils should go for "safe" names and # versions in distribution archive names (sdist and bdist). parts = basename.split('-') if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]): # it is a bdist_dumb, not an sdist -- bail out return for p in range(1, len(parts) + 1): yield Distribution( location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), py_version=py_version, precedence=precedence, platform=platform ) # From Python 2.7 docs def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen = set() seen_add = seen.add if key is None: for element in six.moves.filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element def unique_values(func): """ Wrap a function returning an iterable such that the resulting iterable only ever yields unique items. """ @wraps(func) def wrapper(*args, **kwargs): return unique_everseen(func(*args, **kwargs)) return wrapper REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) # this line is here to fix emacs' cruddy broken syntax highlighting @unique_values def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urllib.parse.urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos != -1: match = HREF.search(page, pos) if match: yield urllib.parse.urljoin(url, htmldecode(match.group(1))) class ContentChecker(object): """ A null content checker that defines the interface for checking content """ def feed(self, block): """ Feed a block of data to the hash. """ return def is_valid(self): """ Check the hash. Return False if validation fails. """ return True def report(self, reporter, template): """ Call reporter with information about the checker (hash name) substituted into the template. """ return class HashChecker(ContentChecker): pattern = re.compile( r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)=' r'(?P<expected>[a-f0-9]+)' ) def __init__(self, hash_name, expected): self.hash_name = hash_name self.hash = hashlib.new(hash_name) self.expected = expected @classmethod def from_url(cls, url): "Construct a (possibly null) ContentChecker from a URL" fragment = urllib.parse.urlparse(url)[-1] if not fragment: return ContentChecker() match = cls.pattern.search(fragment) if not match: return ContentChecker() return cls(**match.groupdict()) def feed(self, block): self.hash.update(block) def is_valid(self): return self.hash.hexdigest() == self.expected def report(self, reporter, template): msg = template % self.hash_name return reporter(msg) class PackageIndex(Environment): """A distribution index that scans web pages for download URLs""" def __init__( self, index_url="https://pypi.python.org/simple", hosts=('*',), ca_bundle=None, verify_ssl=True, *args, **kw ): Environment.__init__(self, *args, **kw) self.index_url = index_url + "/" [:not index_url.endswith('/')] self.scanned_urls = {} self.fetched_urls = {} self.package_pages = {} self.allows = re.compile('|'.join(map(translate, hosts))).match self.to_scan = [] use_ssl = ( verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()) ) if use_ssl: self.opener = ssl_support.opener_for(ca_bundle) else: self.opener = urllib.request.urlopen def process_url(self, url, retrieve=False): """Evaluate a URL as a possible download, and maybe retrieve it""" if url in self.scanned_urls and not retrieve: return self.scanned_urls[url] = True if not URL_SCHEME(url): self.process_filename(url) return else: dists = list(distros_for_url(url)) if dists: if not self.url_ok(url): return self.debug("Found link: %s", url) if dists or not retrieve or url in self.fetched_urls: list(map(self.add, dists)) return # don't need the actual page if not self.url_ok(url): self.fetched_urls[url] = True return self.info("Reading %s", url) self.fetched_urls[url] = True # prevent multiple fetch attempts tmpl = "Download error on %s: %%s -- Some packages may not be found!" f = self.open_url(url, tmpl % url) if f is None: return self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): f.close() # not html, we can't process it return base = f.url # handle redirects page = f.read() if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. if isinstance(f, urllib.error.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' else: charset = f.headers.get_param('charset') or 'latin-1' page = page.decode(charset, "ignore") f.close() for match in HREF.finditer(page): link = urllib.parse.urljoin(base, htmldecode(match.group(1))) self.process_url(link) if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: page = self.process_index(url, page) def process_filename(self, fn, nested=False): # process filenames or directories if not os.path.exists(fn): self.warn("Not found: %s", fn) return if os.path.isdir(fn) and not nested: path = os.path.realpath(fn) for item in os.listdir(path): self.process_filename(os.path.join(path, item), True) dists = distros_for_filename(fn) if dists: self.debug("Found: %s", fn) list(map(self.add, dists)) def url_ok(self, url, fatal=False): s = URL_SCHEME(url) is_file = s and s.group(1).lower() == 'file' if is_file or self.allows(urllib.parse.urlparse(url)[1]): return True msg = ("\nNote: Bypassing %s (disallowed host; see " "http://bit.ly/1dg9ijs for details).\n") if fatal: raise DistutilsError(msg % url) else: self.warn(msg, url) def scan_egg_links(self, search_path): dirs = filter(os.path.isdir, search_path) egg_links = ( (path, entry) for path in dirs for entry in os.listdir(path) if entry.endswith('.egg-link') ) list(itertools.starmap(self.scan_egg_link, egg_links)) def scan_egg_link(self, path, entry): with open(os.path.join(path, entry)) as raw_lines: # filter non-empty lines lines = list(filter(None, map(str.strip, raw_lines))) if len(lines) != 2: # format is not recognized; punt return egg_path, setup_path = lines for dist in find_distributions(os.path.join(path, egg_path)): dist.location = os.path.join(path, *lines) dist.precedence = SOURCE_DIST self.add(dist) def process_index(self, url, page): """Process the contents of a PyPI page""" def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = list(map( urllib.parse.unquote, link[len(self.index_url):].split('/') )) if len(parts) == 2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) self.package_pages.setdefault(pkg.lower(), {})[link] = True return to_filename(pkg), to_filename(ver) return None, None # process an index page into the package-page index for match in HREF.finditer(page): try: scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) except ValueError: pass pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): # Process the found URL base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: new_url += '#egg=%s-%s' % (pkg, ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page ) else: return "" # no sense double-scanning non-package pages def need_version_info(self, url): self.scan_all( "Page at %s links to .py file(s) without version info; an index " "scan is required.", url ) def scan_all(self, msg=None, *args): if self.index_url not in self.fetched_urls: if msg: self.warn(msg, *args) self.info( "Scanning index of all packages (this may take a while)" ) self.scan_url(self.index_url) def find_packages(self, requirement): self.scan_url(self.index_url + requirement.unsafe_name + '/') if not self.package_pages.get(requirement.key): # Fall back to safe version of the name self.scan_url(self.index_url + requirement.project_name + '/') if not self.package_pages.get(requirement.key): # We couldn't find the target package, so search the index page too self.not_found_in_index(requirement) for url in list(self.package_pages.get(requirement.key, ())): # scan each page that might be related to the desired package self.scan_url(url) def obtain(self, requirement, installer=None): self.prescan() self.find_packages(requirement) for dist in self[requirement.key]: if dist in requirement: return dist self.debug("%s does not match %s", requirement, dist) return super(PackageIndex, self).obtain(requirement, installer) def check_hash(self, checker, filename, tfp): """ checker is a ContentChecker """ checker.report(self.debug, "Validating %%s checksum for %s" % filename) if not checker.is_valid(): tfp.close() os.unlink(filename) raise DistutilsError( "%s validation failed for %s; " "possible download problem?" % ( checker.hash.name, os.path.basename(filename)) ) def add_find_links(self, urls): """Add `urls` to the list that will be prescanned for searches""" for url in urls: if ( self.to_scan is None # if we have already "gone online" or not URL_SCHEME(url) # or it's a local file/directory or url.startswith('file:') or list(distros_for_url(url)) # or a direct package link ): # then go ahead and process it now self.scan_url(url) else: # otherwise, defer retrieval till later self.to_scan.append(url) def prescan(self): """Scan urls scheduled for prescanning (e.g. --find-links)""" if self.to_scan: list(map(self.scan_url, self.to_scan)) self.to_scan = None # from now on, go ahead and process immediately def not_found_in_index(self, requirement): if self[requirement.key]: # we've seen at least one distro meth, msg = self.info, "Couldn't retrieve index page for %r" else: # no distros seen for this name, might be misspelled meth, msg = (self.warn, "Couldn't find index page for %r (maybe misspelled?)") meth(msg, requirement.unsafe_name) self.scan_all() def download(self, spec, tmpdir): """Locate and/or download `spec` to `tmpdir`, returning a local path `spec` may be a ``Requirement`` object, or a string containing a URL, an existing local filename, or a project/version requirement spec (i.e. the string form of a ``Requirement`` object). If it is the URL of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is automatically created alongside the downloaded file. If `spec` is a ``Requirement`` object or a string containing a project/version requirement spec, this method returns the location of a matching distribution (possibly after downloading it to `tmpdir`). If `spec` is a locally existing file or directory name, it is simply returned unchanged. If `spec` is a URL, it is downloaded to a subpath of `tmpdir`, and the local filename is returned. Various errors may be raised if a problem occurs during downloading. """ if not isinstance(spec, Requirement): scheme = URL_SCHEME(spec) if scheme: # It's a url, download it to tmpdir found = self._download_url(scheme.group(1), spec, tmpdir) base, fragment = egg_info_for_url(spec) if base.endswith('.py'): found = self.gen_setup(found, fragment, tmpdir) return found elif os.path.exists(spec): # Existing file or directory, just return it return spec else: spec = parse_requirement_arg(spec) return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) def fetch_distribution( self, requirement, tmpdir, force_scan=False, source=False, develop_ok=False, local_index=None ): """Obtain a distribution suitable for fulfilling `requirement` `requirement` must be a ``pkg_resources.Requirement`` instance. If necessary, or if the `force_scan` flag is set, the requirement is searched for in the (online) package index as well as the locally installed packages. If a distribution matching `requirement` is found, the returned distribution's ``location`` is the value you would have gotten from calling the ``download()`` method with the matching distribution's URL or filename. If no matching distribution is found, ``None`` is returned. If the `source` flag is set, only source distributions and source checkout links will be considered. Unless the `develop_ok` flag is set, development and system eggs (i.e., those using the ``.egg-info`` format) will be ignored. """ # process a Requirement self.info("Searching for %s", requirement) skipped = {} dist = None def find(req, env=None): if env is None: env = self # Find a matching distribution; may be called more than once for dist in env[req.key]: if dist.precedence == DEVELOP_DIST and not develop_ok: if dist not in skipped: self.warn("Skipping development or system egg: %s", dist) skipped[dist] = 1 continue if dist in req and (dist.precedence <= SOURCE_DIST or not source): dist.download_location = self.download(dist.location, tmpdir) if os.path.exists(dist.download_location): return dist if force_scan: self.prescan() self.find_packages(requirement) dist = find(requirement) if not dist and local_index is not None: dist = find(requirement, local_index) if dist is None: if self.to_scan is not None: self.prescan() dist = find(requirement) if dist is None and not force_scan: self.find_packages(requirement) dist = find(requirement) if dist is None: self.warn( "No local packages or working download links found for %s%s", (source and "a source distribution of " or ""), requirement, ) else: self.info("Best match: %s", dist) return dist.clone(location=dist.download_location) def fetch(self, requirement, tmpdir, force_scan=False, source=False): """Obtain a file suitable for fulfilling `requirement` DEPRECATED; use the ``fetch_distribution()`` method now instead. For backward compatibility, this routine is identical but returns the ``location`` of the downloaded distribution instead of a distribution object. """ dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) if dist is not None: return dist.location return None def gen_setup(self, filename, fragment, tmpdir): match = EGG_FRAGMENT.match(fragment) dists = match and [ d for d in interpret_distro_name(filename, match.group(1), None) if d.version ] or [] if len(dists) == 1: # unambiguous ``#egg`` fragment basename = os.path.basename(filename) # Make sure the file has been downloaded to the temp dir. if os.path.dirname(filename) != tmpdir: dst = os.path.join(tmpdir, basename) from setuptools.command.easy_install import samefile if not samefile(filename, dst): shutil.copy2(filename, dst) filename = dst with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: file.write( "from setuptools import setup\n" "setup(name=%r, version=%r, py_modules=[%r])\n" % ( dists[0].project_name, dists[0].version, os.path.splitext(basename)[0] ) ) return filename elif match: raise DistutilsError( "Can't unambiguously interpret project/version identifier %r; " "any dashes in the name or version should be escaped using " "underscores. %r" % (fragment, dists) ) else: raise DistutilsError( "Can't process plain .py files without an '#egg=name-version'" " suffix to enable automatic setup script generation." ) dl_blocksize = 8192 def _download_to(self, url, filename): self.info("Downloading %s", url) # Download the file fp, info = None, None try: checker = HashChecker.from_url(url) fp = self.open_url(strip_fragment(url)) if isinstance(fp, urllib.error.HTTPError): raise DistutilsError( "Can't download %s: %s %s" % (url, fp.code, fp.msg) ) headers = fp.info() blocknum = 0 bs = self.dl_blocksize size = -1 if "content-length" in headers: # Some servers return multiple Content-Length headers :( sizes = get_all_headers(headers, 'Content-Length') size = max(map(int, sizes)) self.reporthook(url, filename, blocknum, bs, size) with open(filename, 'wb') as tfp: while True: block = fp.read(bs) if block: checker.feed(block) tfp.write(block) blocknum += 1 self.reporthook(url, filename, blocknum, bs, size) else: break self.check_hash(checker, filename, tfp) return headers finally: if fp: fp.close() def reporthook(self, url, filename, blocknum, blksize, size): pass # no-op def open_url(self, url, warning=None): if url.startswith('file:'): return local_open(url) try: return open_with_auth(url, self.opener) except (ValueError, http_client.InvalidURL) as v: msg = ' '.join([str(arg) for arg in v.args]) if warning: self.warn(warning, msg) else: raise DistutilsError('%s %s' % (url, msg)) except urllib.error.HTTPError as v: return v except urllib.error.URLError as v: if warning: self.warn(warning, v.reason) else: raise DistutilsError("Download error for %s: %s" % (url, v.reason)) except http_client.BadStatusLine as v: if warning: self.warn(warning, v.line) else: raise DistutilsError( '%s returned a bad status line. The server might be ' 'down, %s' % (url, v.line) ) except (http_client.HTTPException, socket.error) as v: if warning: self.warn(warning, v) else: raise DistutilsError("Download error for %s: %s" % (url, v)) def _download_url(self, scheme, url, tmpdir): # Determine download filename # name, fragment = egg_info_for_url(url) if name: while '..' in name: name = name.replace('..', '.').replace('\\', '_') else: name = "__downloaded__" # default if URL has no path contents if name.endswith('.egg.zip'): name = name[:-4] # strip the extra .zip before download filename = os.path.join(tmpdir, name) # Download the file # if scheme == 'svn' or scheme.startswith('svn+'): return self._download_svn(url, filename) elif scheme == 'git' or scheme.startswith('git+'): return self._download_git(url, filename) elif scheme.startswith('hg+'): return self._download_hg(url, filename) elif scheme == 'file': return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) else: self.url_ok(url, True) # raises error if not allowed return self._attempt_download(url, filename) def scan_url(self, url): self.process_url(url, True) def _attempt_download(self, url, filename): headers = self._download_to(url, filename) if 'html' in headers.get('content-type', '').lower(): return self._download_html(url, headers, filename) else: return filename def _download_html(self, url, headers, filename): file = open(filename) for line in file: if line.strip(): # Check for a subversion index page if re.search(r'<title>([^- ]+ - )?Revision \d+:', line): # it's a subversion index page: file.close() os.unlink(filename) return self._download_svn(url, filename) break # not an index page file.close() os.unlink(filename) raise DistutilsError("Unexpected HTML page found at " + url) def _download_svn(self, url, filename): url = url.split('#', 1)[0] # remove any fragment for svn's sake creds = '' if url.lower().startswith('svn:') and '@' in url: scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) if not netloc and path.startswith('//') and '/' in path[2:]: netloc, path = path[2:].split('/', 1) auth, host = splituser(netloc) if auth: if ':' in auth: user, pw = auth.split(':', 1) creds = " --username=%s --password=%s" % (user, pw) else: creds = " --username=" + auth netloc = host parts = scheme, netloc, url, p, q, f url = urllib.parse.urlunparse(parts) self.info("Doing subversion checkout from %s to %s", url, filename) os.system("svn checkout%s -q %s %s" % (creds, url, filename)) return filename @staticmethod def _vcs_split_rev_from_url(url, pop_prefix=False): scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) scheme = scheme.split('+', 1)[-1] # Some fragment identification fails path = path.split('#', 1)[0] rev = None if '@' in path: path, rev = path.rsplit('@', 1) # Also, discard fragment url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) return url, rev def _download_git(self, url, filename): filename = filename.split('#', 1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing git clone from %s to %s", url, filename) os.system("git clone --quiet %s %s" % (url, filename)) if rev is not None: self.info("Checking out %s", rev) os.system("(cd %s && git checkout --quiet %s)" % ( filename, rev, )) return filename def _download_hg(self, url, filename): filename = filename.split('#', 1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing hg clone from %s to %s", url, filename) os.system("hg clone --quiet %s %s" % (url, filename)) if rev is not None: self.info("Updating to %s", rev) os.system("(cd %s && hg up -C -r %s >&-)" % ( filename, rev, )) return filename def debug(self, msg, *args): log.debug(msg, *args) def info(self, msg, *args): log.info(msg, *args) def warn(self, msg, *args): log.warn(msg, *args) # This pattern matches a character entity reference (a decimal numeric # references, a hexadecimal numeric reference, or a named reference). entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub def uchr(c): if not isinstance(c, int): return c if c > 255: return six.unichr(c) return chr(c) def decode_entity(match): what = match.group(1) if what.startswith('#x'): what = int(what[2:], 16) elif what.startswith('#'): what = int(what[1:]) else: what = six.moves.html_entities.name2codepoint.get(what, match.group(0)) return uchr(what) def htmldecode(text): """Decode HTML entities in the given text.""" return entity_sub(decode_entity, text) def socket_timeout(timeout=15): def _socket_timeout(func): def _socket_timeout(*args, **kwargs): old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: return func(*args, **kwargs) finally: socket.setdefaulttimeout(old_timeout) return _socket_timeout return _socket_timeout def _encode_auth(auth): """ A function compatible with Python 2.3-3.3 that will encode auth from a URL suitable for an HTTP header. >>> str(_encode_auth('username%3Apassword')) 'dXNlcm5hbWU6cGFzc3dvcmQ=' Long auth strings should not cause a newline to be inserted. >>> long_auth = 'username:' + 'password'*10 >>> chr(10) in str(_encode_auth(long_auth)) False """ auth_s = urllib.parse.unquote(auth) # convert to bytes auth_bytes = auth_s.encode() # use the legacy interface for Python 2.3 support encoded_bytes = base64.encodestring(auth_bytes) # convert back to a string encoded = encoded_bytes.decode() # strip the trailing carriage return return encoded.replace('\n', '') class Credential(object): """ A username/password pair. Use like a namedtuple. """ def __init__(self, username, password): self.username = username self.password = password def __iter__(self): yield self.username yield self.password def __str__(self): return '%(username)s:%(password)s' % vars(self) class PyPIConfig(configparser.RawConfigParser): def __init__(self): """ Load from ~/.pypirc """ defaults = dict.fromkeys(['username', 'password', 'repository'], '') configparser.RawConfigParser.__init__(self, defaults) rc = os.path.join(os.path.expanduser('~'), '.pypirc') if os.path.exists(rc): self.read(rc) @property def creds_by_repository(self): sections_with_repositories = [ section for section in self.sections() if self.get(section, 'repository').strip() ] return dict(map(self._get_repo_cred, sections_with_repositories)) def _get_repo_cred(self, section): repo = self.get(section, 'repository').strip() return repo, Credential( self.get(section, 'username').strip(), self.get(section, 'password').strip(), ) def find_credential(self, url): """ If the URL indicated appears to be a repository defined in this config, return the credential for that repository. """ for repository, cred in self.creds_by_repository.items(): if url.startswith(repository): return cred def open_with_auth(url, opener=urllib.request.urlopen): """Open a urllib2 request, handling HTTP authentication""" scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url) # Double scheme does not raise on Mac OS X as revealed by a # failing test. We would expect "nonnumeric port". Refs #20. if netloc.endswith(':'): raise http_client.InvalidURL("nonnumeric port: ''") if scheme in ('http', 'https'): auth, host = splituser(netloc) else: auth = None if not auth: cred = PyPIConfig().find_credential(url) if cred: auth = str(cred) info = cred.username, url log.info('Authenticating as %s for %s (from .pypirc)', *info) if auth: auth = "Basic " + _encode_auth(auth) parts = scheme, host, path, params, query, frag new_url = urllib.parse.urlunparse(parts) request = urllib.request.Request(new_url) request.add_header("Authorization", auth) else: request = urllib.request.Request(url) request.add_header('User-Agent', user_agent) fp = opener(request) if auth: # Put authentication info back into request URL if same host, # so that links found on the page will work s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) if s2 == scheme and h2 == host: parts = s2, netloc, path2, param2, query2, frag2 fp.url = urllib.parse.urlunparse(parts) return fp # adding a timeout to avoid freezing package_index open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) def fix_sf_url(url): return url # backward compatibility def local_open(url): """Read a local path, with special support for directories""" scheme, server, path, param, query, frag = urllib.parse.urlparse(url) filename = urllib.request.url2pathname(path) if os.path.isfile(filename): return urllib.request.urlopen(url) elif path.endswith('/') and os.path.isdir(filename): files = [] for f in os.listdir(filename): filepath = os.path.join(filename, f) if f == 'index.html': with open(filepath, 'r') as fp: body = fp.read() break elif os.path.isdir(filepath): f += '/' files.append('<a href="{name}">{name}</a>'.format(name=f)) else: tmpl = ("<html><head><title>{url}</title>" "</head><body>{files}</body></html>") body = tmpl.format(url=url, files='\n'.join(files)) status, message = 200, "OK" else: status, message, body = 404, "Path not found", "Not found" headers = {'content-type': 'text/html'} body_stream = six.StringIO(body) return urllib.error.HTTPError(url, status, message, headers, body_stream)
apache-2.0
liberatorqjw/scikit-learn
sklearn/neighbors/regression.py
39
10464
"""Nearest Neighbor Regression""" # Authors: Jake Vanderplas <vanderplas@astro.washington.edu> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl> # Multi-output support by Arnaud Joly <a.joly@ulg.ac.be> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import numpy as np from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin from .base import RadiusNeighborsMixin, SupervisedFloatMixin from ..base import RegressorMixin from ..utils import check_array class KNeighborsRegressor(NeighborsBase, KNeighborsMixin, SupervisedFloatMixin, RegressorMixin): """Regression based on k-nearest neighbors. The target is predicted by local interpolation of the targets associated of the nearest neighbors in the training set. Parameters ---------- n_neighbors : int, optional (default = 5) Number of neighbors to use by default for :meth:`k_neighbors` queries. weights : str or callable weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or DistanceMetric object (default='minkowski') the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional (default = None) additional keyword arguments for the metric function. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import KNeighborsRegressor >>> neigh = KNeighborsRegressor(n_neighbors=2) >>> neigh.fit(X, y) # doctest: +ELLIPSIS KNeighborsRegressor(...) >>> print(neigh.predict([[1.5]])) [ 0.5] See also -------- NearestNeighbors RadiusNeighborsRegressor KNeighborsClassifier RadiusNeighborsClassifier Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. .. warning:: Regarding the Nearest Neighbors algorithms, if it is found that two neighbors, neighbor `k+1` and `k`, have identical distances but but different labels, the results will depend on the ordering of the training data. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, **kwargs): self._init_params(n_neighbors=n_neighbors, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, **kwargs) self.weights = _check_weights(weights) def predict(self, X): """Predict the target for the provided data Parameters ---------- X : array or matrix, shape = [n_samples, n_features] Returns ------- y : array of int, shape = [n_samples] or [n_samples, n_outputs] Target values """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum(_y[neigh_ind, j] * weights, axis=1) y_pred[:, j] = num / denom if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin, SupervisedFloatMixin, RegressorMixin): """Regression based on neighbors within a fixed radius. The target is predicted by local interpolation of the targets associated of the nearest neighbors in the training set. Parameters ---------- radius : float, optional (default = 1.0) Range of parameter space to use by default for :meth`radius_neighbors` queries. weights : str or callable weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or DistanceMetric object (default='minkowski') the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional (default = None) additional keyword arguments for the metric function. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import RadiusNeighborsRegressor >>> neigh = RadiusNeighborsRegressor(radius=1.0) >>> neigh.fit(X, y) # doctest: +ELLIPSIS RadiusNeighborsRegressor(...) >>> print(neigh.predict([[1.5]])) [ 0.5] See also -------- NearestNeighbors KNeighborsRegressor KNeighborsClassifier RadiusNeighborsClassifier Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, radius=1.0, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, **kwargs): self._init_params(radius=radius, algorithm=algorithm, leaf_size=leaf_size, p=p, metric=metric, metric_params=metric_params, **kwargs) self.weights = _check_weights(weights) def predict(self, X): """Predict the target for the provided data Parameters ---------- X : array or matrix, shape = [n_samples, n_features] Returns ------- y : array of int, shape = [n_samples] or [n_samples, n_outputs] Target values """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.radius_neighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.array([np.mean(_y[ind, :], axis=0) for ind in neigh_ind]) else: y_pred = np.array([(np.average(_y[ind, :], axis=0, weights=weights[i])) for (i, ind) in enumerate(neigh_ind)]) if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
bsd-3-clause
ashray/VTK-EVM
ThirdParty/Twisted/twisted/pair/ethernet.py
67
1703
# -*- test-case-name: twisted.pair.test.test_ethernet -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. # """Support for working directly with ethernet frames""" import struct from twisted.internet import protocol from twisted.pair import raw from zope.interface import implements, Interface class IEthernetProtocol(Interface): """An interface for protocols that handle Ethernet frames""" def addProto(): """Add an IRawPacketProtocol protocol""" def datagramReceived(): """An Ethernet frame has been received""" class EthernetHeader: def __init__(self, data): (self.dest, self.source, self.proto) \ = struct.unpack("!6s6sH", data[:6+6+2]) class EthernetProtocol(protocol.AbstractDatagramProtocol): implements(IEthernetProtocol) def __init__(self): self.etherProtos = {} def addProto(self, num, proto): proto = raw.IRawPacketProtocol(proto) if num < 0: raise TypeError, 'Added protocol must be positive or zero' if num >= 2**16: raise TypeError, 'Added protocol must fit in 16 bits' if num not in self.etherProtos: self.etherProtos[num] = [] self.etherProtos[num].append(proto) def datagramReceived(self, data, partial=0): header = EthernetHeader(data[:14]) for proto in self.etherProtos.get(header.proto, ()): proto.datagramReceived(data=data[14:], partial=partial, dest=header.dest, source=header.source, protocol=header.proto)
bsd-3-clause
jasonpr59/lisp
datatypes.py
1
3115
from collections import namedtuple import fractions class DataType(object): """ABC for Lisp Datatypes. We do not guarantee that all Lisp data inherrits from DataType. """ class Fraction(DataType, fractions.Fraction): def __repr__(self): if self.denominator == 1: print 'NUM %s' % self.numerator return '%s' % self.numerator else: return '%s/%s' % (self.numerator, self.denominator) def assert_int(value): assert isinstance(value, int) or value.denominator == 1 return int(value) class Symbol(DataType): """A Scheme symbol equivalent.""" def __init__(self, value): self.value = value def __repr__(self): return str(self.value) class LispFunction(DataType): """A function defined in Lisp.""" def __init__(self, env, arg_names, exprs): self.env = env self.arg_names = arg_names self.exprs = exprs def __repr__(self): return "LispFunction[%s -> %s]" % (self.arg_names, self.exprs) class Pair(namedtuple('Pair', ['car', 'cdr'])): """A Lisp pair, with a car and a cdr.""" def __repr__(self): return '(%s . %s)' % self class Vector(DataType): """A collection with O(1) access of any element. """ def __init__(self, elements): self._elements = list(elements) def get(self, num): return self._elements[assert_int(num)] def set(self, num, value): # TODO(jasonpr): Determine when to allow vector mutation. self._elements[assert_int(num)] = value @classmethod def make(cls, length, fill_value=0): backing_list = [fill_value] * assert_int(length) return cls(backing_list) def length(self): return Fraction(len(self._elements)) def as_list(self): # TODO(jasonpr): Implement once we've reconciled Python lists # with Lisp lists. raise NotImplementedError @classmethod def from_list(cls, source_list): # TODO(jasonpr): Implement once we've reconciled Python lists # with Lisp lists. raise NotImplementedError def fill(self, fill_value): new_backing_list = [fill_value] * len(self._elements) self._elements = new_backing_list def __repr__(self): return '#(' + ' '.join(str(elt) for elt in self._elements) + ')' class _Boolean(DataType): """A boolean, with a Scheme-like #t/#f representation.""" def __init__(self, value): self._value = value def __repr__(self): return '#t' if self._value else '#f' _lisp_true = _Boolean(True) _lisp_false = _Boolean(False) def lisp_bool(value): """Gets the _Boolean representation of True and False.""" return _lisp_true if value else _lisp_false class _Null(DataType): """The empty list, which is NOT a pair! Only one null object shall ever be created. It is datatypes.null, and it is created below. """ def __repr__(self): return '()' null = _Null() def is_null(data): """Returns whether an object is the (singleton) null object.""" return data is null
gpl-2.0
kajgan/stbgui
lib/python/Screens/SessionGlobals.py
29
2428
from Screens.Screen import Screen from Components.Sources.CurrentService import CurrentService from Components.Sources.EventInfo import EventInfo from Components.Sources.FrontendStatus import FrontendStatus from Components.Sources.FrontendInfo import FrontendInfo from Components.Sources.Source import Source from Components.Sources.TunerInfo import TunerInfo from Components.Sources.Boolean import Boolean from Components.Sources.RecordState import RecordState from Components.Sources.HddState import HddState from Components.Converter.Combine import Combine from Components.Renderer.FrontpanelLed import FrontpanelLed class SessionGlobals(Screen): def __init__(self, session): Screen.__init__(self, session) self["CurrentService"] = CurrentService(session.nav) self["Event_Now"] = EventInfo(session.nav, EventInfo.NOW) self["Event_Next"] = EventInfo(session.nav, EventInfo.NEXT) self["FrontendStatus"] = FrontendStatus(service_source = session.nav.getCurrentService) self["FrontendInfo"] = FrontendInfo(navcore = session.nav) self["VideoPicture"] = Source() self["TunerInfo"] = TunerInfo() self["RecordState"] = RecordState(session) self["Standby"] = Boolean(fixed = False) self["HddSleepingState"] = HddState(session) from Components.SystemInfo import SystemInfo combine = Combine(func = lambda s: {(False, False): 0, (False, True): 1, (True, False): 2, (True, True): 3}[(s[0].boolean, s[1].boolean)]) combine.connect(self["Standby"]) combine.connect(self["RecordState"]) combine.connect(self["HddSleepingState"]) # | two leds | single led | # recordstate standby red green # false false off on off # true false blnk on blnk # false true on off off # true true blnk off blnk PATTERN_ON = (20, 0xffffffff, 0xffffffff) PATTERN_OFF = (20, 0, 0) PATTERN_BLINK = (20, 0x55555555, 0xa7fccf7a) nr_leds = SystemInfo.get("NumFrontpanelLEDs", 0) if nr_leds == 1: FrontpanelLed(which = 0, boolean = False, patterns = [PATTERN_OFF, PATTERN_BLINK, PATTERN_OFF, PATTERN_BLINK]).connect(combine) elif nr_leds == 2: FrontpanelLed(which = 0, boolean = False, patterns = [PATTERN_OFF, PATTERN_BLINK, PATTERN_ON, PATTERN_BLINK]).connect(combine) FrontpanelLed(which = 1, boolean = False, patterns = [PATTERN_ON, PATTERN_ON, PATTERN_OFF, PATTERN_OFF]).connect(combine)
gpl-2.0
gawrysz/piernik
bin/qa.py
3
15772
#!/usr/bin/env python import re import sys import hashlib import subprocess as sp import numpy as np debug = False typ1 = np.dtype([('name', 'a50'), ('beg', 'i'), ('end', 'i'), ('type', 'a4')]) # starts with spaces or spaces and one of { 'end', 'pure', ... } # if function it can have a type next goes subroutine or function or type test_for_routines = re.compile(''' ^\s{0,12}(|end|pure|elemental|recursive|((type|real|logical|integer)(|\([^(]*\))))(|\s) (|pure|elemental|recursive|((type|real|logical|integer)(|\([^(]*\))))(|\s) (subroutine|function|type(,|\s)) ''', re.VERBOSE) # starts with spaces or spaces and one of { 'end', 'pure', ... } # next goes subroutine or function or type test_for_interfaces = re.compile(''' ^\s{0,12}(|end|abstract)\s interface ''', re.VERBOSE) # test_for_routines = re.compile(''' # ^(?!\s{0,9}!).*(subroutine|function|type(,|\s::)) # ''',re.VERBOSE) module_body = re.compile( '''^(module|contains|program)''', re.VERBOSE) just_end = re.compile('''^\s{0,9}end''', re.IGNORECASE) have_implicit = re.compile('''implicit\snone''', re.IGNORECASE) have_privpub = re.compile('''^\s{0,9}(public|private)''', re.VERBOSE) have_pub = re.compile('''^\s{0,9}public''', re.VERBOSE) have_priv = re.compile('''^\s{0,9}private\s::''', re.VERBOSE) remove_warn = re.compile('''(?!.*QA_WARN .+)''', re.VERBOSE) have_global_public = re.compile('''^\s{0,9}public(?!.*::)''', re.VERBOSE) depr_syntax_1 = re.compile('''^\s{1,12}(?:real(?:\s|,)|integer(?:\s|,)|logical(?:\s|,|\()|character(?:\s|,))(?!.*::)''', re.IGNORECASE) depr_syntax_2 = re.compile('''^\s{1,12}use[\s](?!.*only)''', re.IGNORECASE) depr_syntax_3 = re.compile('''^\s{1,12}character(?![(])''', re.IGNORECASE) is_function = re.compile('''(?i)\sfunction\s''', re.IGNORECASE) not_function = re.compile('''(?!.*function)''', re.IGNORECASE) tab_char = re.compile('\t') has_use = re.compile("^\s{1,12}use\s", re.IGNORECASE) have_label = re.compile('^[0-9]', re.VERBOSE) crude_write = re.compile("write *\( *\*", re.IGNORECASE) magic_integer = re.compile("\(len=[1-9]", re.IGNORECASE) continuation = re.compile('&$', re.VERBOSE) implicit_save = re.compile('''(?:real(?:\s|,)|integer(?:\s|,)|logical(?:\s|,|\()|character(?:\s|,)).*::.*=(|\s|)[0-9]''', re.IGNORECASE) not_param_nor_save = re.compile("(?!.*(parameter|save))", re.IGNORECASE) nasty_spaces = [ re.compile("^([\s0-9]*)end\s{1,}do", re.IGNORECASE), r"\1enddo", re.compile("^([\s0-9]*)end\s{1,}if", re.IGNORECASE), r"\1endif", re.compile("^([\s0-9]*)end\s{1,}while", re.IGNORECASE), r"\1endwhile", re.compile("^([\s0-9]*)end\s{1,}where", re.IGNORECASE), r"\1endwhere", re.compile("only\s{1,}:", re.IGNORECASE), "only:", re.compile("\sif(|\s{2,})\(", re.IGNORECASE), " if (", re.compile("\swhere(|\s{2,})\(", re.IGNORECASE), " where (", re.compile("\swhile(|\s{2,})\(", re.IGNORECASE), " while (", re.compile("\sforall(|\s{2,})\(", re.IGNORECASE), " forall (", re.compile("\scase(|\s{2,})\(", re.IGNORECASE), " case (" ] class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' def disable(self): self.HEADER = '' self.OKBLUE = '' self.OKGREEN = '' self.WARNING = '' self.FAIL = '' self.ENDC = '' b = bcolors() def remove_binaries(files): list = [] for file in files: checkFile = sp.Popen('file -bi ' + file, stdout=sp.PIPE, shell=True, executable="/bin/bash") if not checkFile.communicate()[0].startswith(b'text'): print(b.WARNING + "QA: " + b.ENDC + file + " is not a text file. I will not test it.") else: list.append(file) return list def select_sources(files): test = re.compile("F90$", re.IGNORECASE) return list(filter(test.search, files)) def wtf(lines, line, rname, fname): if(isinstance(lines, np.ndarray)): linenum = line_num(lines, line) else: linenum = lines line = line.split("!")[0] # Strip comments if(rname == ''): return " [%s]@L%i => %s" % (fname, linenum, line.strip()) else: return " [%s:%s]@L%i => %s" % (fname, rname, linenum, line.strip()) def line_num(lines, line): return np.where(lines == line)[0][0] def give_warn(s): return b.WARNING + "Warning: " + s + b.ENDC def give_err(s): return b.FAIL + "Error: " + s + b.ENDC def parse_f90file(lines, fname, store): if (debug): print("[parse_f90file] fname = ", fname) subs_array = np.zeros((0,), dtype=typ1) subs = list(filter(test_for_routines.search, lines)) subs_names = [] subs_types = [] for f in subs: if (just_end.match(f)): word = f.strip().split(' ') subs_types.insert(0, word[1]) if (len(word) >= 3): subs_names.append(word[2]) else: store.append(give_warn("QA: ") + '[%s] "%s" without %s name' % (fname, f.strip(), word[1] if (len(word) > 1) else "any")) for f in subs_names: cur_sub = list(filter(re.compile(f).search, subs)) if (len(cur_sub) > 2): if (debug): print("[parse_f90file] f, cur_sub = ", f, cur_sub) for index in range(0, len(cur_sub)): if just_end.match(cur_sub[index]): if cur_sub[index].split()[1] == subs_types[-1] and \ cur_sub[index][-len(f):] == f: break else: index = 1 obj = (f, line_num(lines, cur_sub[index - 1]), line_num( lines, cur_sub[index]), subs_types.pop()) subs_array = np.append(subs_array, np.array([obj], dtype=typ1)) if (debug): print("[parse_f90file] subs = ", subs) print("[parse_f90file] subs_names = ", subs_names) mod = list(filter(module_body.match, lines)) if (len(mod) <= 0): store.append( give_warn("QA: ") + "[%s] => module body not found!" % fname) else: if (len(mod) > 1): endline = line_num(lines, mod[1]) else: endline = len(lines) obj = (mod[0].strip().split(" ")[1], line_num(lines, mod[0]), endline, mod[0].strip().split(" ")[0][0:3] ) subs_array = np.append(subs_array, np.array([obj], dtype=typ1)) return subs_array def qa_checks(files, options): print(b.OKBLUE + '"I am the purifier, the light that clears all shadows."' + ' - seal of cleansing inscription' + b.ENDC) runpath = sys.argv[0].split("qa.py")[0] files = remove_binaries(files) # ToDo: check files other than F90 f90files = select_sources(files) warns = [] errors = [] for f in f90files: pfile = [] lines = open(f, 'r').readlines() for line in lines: # things done in "in-place" line = line.rstrip() # that removes trailing spaces for i in range(0, len(nasty_spaces), 2): line = re.sub(nasty_spaces[i], nasty_spaces[i + 1], line) # remove nasty spaces pfile.append(line) if lines != [line + '\n' for line in pfile]: diff_cnt = 1 if (len(lines) != len(pfile)) else 0 if diff_cnt: print(give_warn("Line count changed") + " in file '%s'" % f) for i in range(min(len(lines), len(pfile))): if (lines[i] != pfile[i] + '\n'): diff_cnt += 1 if diff_cnt: print(give_warn("QA: ") + "Whitespace changes found in file '%s' (%d lines changed)" % (f, diff_cnt)) fp = open(f, 'w') for line in pfile: fp.write(line + '\n') fp.close() # f = f.split('/')[-1] # checks for f90 file as whole qa_nonconforming_tabs(np.array(pfile), '', errors, f) qa_labels(np.array(pfile), '', errors, f) qa_crude_write(np.array(pfile), '', warns, f) qa_magic_integers(np.array(pfile), '', warns, f) # checks that require parsing f90 files clean_ind = [] pfile = np.array(pfile) # remove interfaces as we currently don't handle them well interfaces = [line_num( pfile, i) for i in filter(test_for_interfaces.search, pfile)] while len(interfaces) > 0: if (debug): print("Removed interface") pfile = np.delete(pfile, np.s_[interfaces[0]:interfaces[1] + 1], 0) interfaces = [line_num( pfile, i) for i in filter(test_for_interfaces.search, pfile)] for obj in parse_f90file(pfile, f, warns): if (debug): print('[qa_checks] obj =', obj) part = pfile[obj['beg']:obj['end']] # if (debug): # for f in part: print f # False refs need to be done before removal of types in module body qa_false_refs(part, obj['name'], warns, f) if(obj['type'] == b'mod'): # check whether already checked lines are accounted to module lines range ci = np.array(clean_ind) eitc = np.where(np.logical_or(ci < obj['beg'], ci > obj['end'])) ind_tbr = np.delete(ci, eitc) # module body is always last, remove lines that've been already checked if (ind_tbr.size > 0): part = np.delete(part, ind_tbr - obj['beg']) qa_have_priv_pub(part, obj['name'], warns, f) else: clean_ind += range(obj['beg'], obj['end'] + 1) qa_depreciated_syntax(part, obj['name'], warns, f) if(obj['type'] != b'type'): qa_have_implicit(part, obj['name'], errors, f) qa_implicit_saves(part, obj['name'], errors, f) if (len(warns)): print(b.WARNING + "%i warning(s) detected. " % len(warns) + b.ENDC) for warning in warns: print(warning) if (len(errors)): print(b.FAIL + "%i error(s) detected! " % len(errors) + b.ENDC) for error in errors: print(error) else: print(b.OKGREEN + "Yay! No errors!!! " + b.ENDC) if (len(errors) == 0 and len(warns) == 0): print(b.OKGREEN + "No warnings detected. " + b.ENDC + "If everyone were like you, I'd be out of business!") def qa_have_priv_pub(lines, name, warns, fname): if(len(list(filter(have_privpub.search, lines))) < 1): warns.append(give_warn("QA: ") + "module [%s:%s] lacks public/private keywords." % (fname, name)) else: if (list(filter(remove_warn.match, filter(have_priv.search, lines)))): warns.append(give_warn("QA: ") + "module [%s:%s] have selective private." % (fname, name)) if (list(filter(remove_warn.match, filter(have_global_public.search, lines)))): warns.append(give_warn("QA: ") + "module [%s:%s] is completely public." % (fname, name)) def qa_crude_write(lines, rname, store, fname): warning = 0 for f in filter(remove_warn.match, filter(crude_write.search, lines)): store.append(give_warn("crude write ") + wtf(lines, f, rname, fname)) def qa_magic_integers(lines, rname, store, fname): for f in filter(remove_warn.match, filter(magic_integer.search, lines)): hits = np.where(lines == f)[0] if(len(hits) > 1): for i in hits: warn = give_warn("magic integer") + wtf(i, f, rname, fname) if(warn not in store): store.append(warn) else: warn = give_warn("magic integer") + wtf(lines, f, rname, fname) if(warn not in store): store.append(warn) def qa_nonconforming_tabs(lines, rname, store, fname): for f in filter(tab_char.search, lines): store.append(give_err("non conforming tab detected ") + wtf(lines, f, rname, fname)) def qa_labels(lines, rname, store, fname): for f in filter(have_label.search, lines): store.append(give_err("label detected ") + wtf(lines, f, rname, fname)) def qa_depreciated_syntax(lines, rname, store, fname): # print b.OKGREEN + "QA: " + b.ENDC + "Checking for depreciated syntax" for f in filter(not_function.match, filter(depr_syntax_1.search, lines)): store.append( give_warn("lacking :: ") + wtf(lines, f, rname, fname)) for f in filter(remove_warn.match, filter(depr_syntax_2.search, lines)): store.append( give_warn("greedy use ") + wtf(lines, f, rname, fname)) for f in filter(depr_syntax_3.search, lines): store.append( give_warn("wrong syntax ") + wtf(lines, f, rname, fname)) def qa_have_implicit(lines, name, store, fname): if(len(list(filter(have_implicit.search, lines))) < 1): store.append(give_err("missing 'implicit none' ") + "[%s:%s]" % (fname, name)) def remove_amp(lines, strip): buf = '' temp = [] for line in lines: if(len(buf)): line = buf + line.lstrip() buf = '' if(continuation.search(line)): buf = re.sub('&', '', line.split("!")[0]) else: if(strip): temp.append(line.split("!")[0]) # kills QA_WARN else: temp.append(line) return temp def qa_false_refs(lines, name, store, fname): temp = remove_amp(filter(remove_warn.match, lines), True) uses = list(filter(has_use.search, temp)) for item in uses: try: to_check = [f.strip() for f in item.split("only:")[1].split(',')] except IndexError: to_check = [] store.append(give_warn("QA: ") + "'" + item + "' without ONLY clause in [%s:%s]" % (fname, name)) to_check = [re.sub('&', '', f).lstrip() for f in to_check] # additional sanitization # remove operator keyword from import for ino, item in enumerate(to_check): try: new_item = re.search('operator\((.+?)\)', item).group(1) except AttributeError: new_item = item to_check[ino] = new_item for func in to_check: pattern = re.compile(func, re.IGNORECASE) # stupid but seems to work if(len(list(filter(pattern.search, temp))) < 2): store.append(give_warn("QA: ") + "'" + func + "' grabbed but not used in [%s:%s]" % (fname, name)) def qa_implicit_saves(lines, name, store, fname): # print b.OKGREEN + "QA: " + b.ENDC + "Checking for implicit saves" impl = list(filter(not_param_nor_save.match, filter(implicit_save.search, remove_amp(filter(remove_warn.match, lines), True)))) if(len(impl)): store.append(give_err("implicit saves detected in ") + "[%s:%s]" % (fname, name)) for line in impl: store.append(line.strip()) if __name__ == "__main__": from optparse import OptionParser usage = "usage: %prog [options] FILES" parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="store_true", dest="debug", default=False, help="make lots of noise [default]") (options, args) = parser.parse_args() debug = options.debug if len(args) < 1: parser.error("incorrect number of arguments") qa_checks(args, options)
gpl-3.0
SimVascular/VTK
Examples/Annotation/Python/annotatePick.py
9
2637
#!/usr/bin/env python # This example demonstrates cell picking using vtkCellPicker. It # displays the results of picking using a vtkTextMapper. from __future__ import print_function import vtk # create a sphere source, mapper, and actor sphere = vtk.vtkSphereSource() sphereMapper = vtk.vtkPolyDataMapper() sphereMapper.SetInputConnection(sphere.GetOutputPort()) sphereMapper.GlobalImmediateModeRenderingOn() sphereActor = vtk.vtkLODActor() sphereActor.SetMapper(sphereMapper) # create the spikes by glyphing the sphere with a cone. Create the # mapper and actor for the glyphs. cone = vtk.vtkConeSource() glyph = vtk.vtkGlyph3D() glyph.SetInputConnection(sphere.GetOutputPort()) glyph.SetSourceConnection(cone.GetOutputPort()) glyph.SetVectorModeToUseNormal() glyph.SetScaleModeToScaleByVector() glyph.SetScaleFactor(0.25) spikeMapper = vtk.vtkPolyDataMapper() spikeMapper.SetInputConnection(glyph.GetOutputPort()) spikeActor = vtk.vtkLODActor() spikeActor.SetMapper(spikeMapper) # Create a text mapper and actor to display the results of picking. textMapper = vtk.vtkTextMapper() tprop = textMapper.GetTextProperty() tprop.SetFontFamilyToArial() tprop.SetFontSize(10) tprop.BoldOn() tprop.ShadowOn() tprop.SetColor(1, 0, 0) textActor = vtk.vtkActor2D() textActor.VisibilityOff() textActor.SetMapper(textMapper) # Create a cell picker. picker = vtk.vtkCellPicker() # Create a Python function to create the text for the text mapper used # to display the results of picking. def annotatePick(object, event): print("pick") global picker, textActor, textMapper if picker.GetCellId() < 0: textActor.VisibilityOff() else: selPt = picker.GetSelectionPoint() pickPos = picker.GetPickPosition() textMapper.SetInput("(%.6f, %.6f, %.6f)"%pickPos) textActor.SetPosition(selPt[:2]) textActor.VisibilityOn() # Now at the end of the pick event call the above function. picker.AddObserver("EndPickEvent", annotatePick) # Create the Renderer, RenderWindow, etc. and set the Picker. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) iren.SetPicker(picker) # Add the actors to the renderer, set the background and size ren.AddActor2D(textActor) ren.AddActor(sphereActor) ren.AddActor(spikeActor) ren.SetBackground(1, 1, 1) renWin.SetSize(300, 300) # Get the camera and zoom in closer to the image. ren.ResetCamera() cam1 = ren.GetActiveCamera() cam1.Zoom(1.4) iren.Initialize() # Initially pick the cell at this location. picker.Pick(85, 126, 0, ren) renWin.Render() iren.Start()
bsd-3-clause
larroy/mxnet
tests/python/unittest/test_io.py
2
22312
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: skip-file import mxnet as mx import mxnet.ndarray as nd from mxnet.test_utils import * from mxnet.base import MXNetError import numpy as np import os import gzip import pickle as pickle import time try: import h5py except ImportError: h5py = None import sys from common import assertRaises import unittest try: from itertools import izip_longest as zip_longest except: from itertools import zip_longest def test_MNISTIter(): # prepare data get_mnist_ubyte() batch_size = 100 train_dataiter = mx.io.MNISTIter( image="data/train-images-idx3-ubyte", label="data/train-labels-idx1-ubyte", data_shape=(784,), batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10) # test_loop nbatch = 60000 / batch_size batch_count = 0 for batch in train_dataiter: batch_count += 1 assert(nbatch == batch_count) # test_reset train_dataiter.reset() train_dataiter.iter_next() label_0 = train_dataiter.getlabel().asnumpy().flatten() train_dataiter.iter_next() train_dataiter.iter_next() train_dataiter.iter_next() train_dataiter.iter_next() train_dataiter.reset() train_dataiter.iter_next() label_1 = train_dataiter.getlabel().asnumpy().flatten() assert(sum(label_0 - label_1) == 0) def test_Cifar10Rec(): get_cifar10() dataiter = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", rand_crop=False, and_mirror=False, shuffle=False, data_shape=(3, 28, 28), batch_size=100, preprocess_threads=4, prefetch_buffer=1) labelcount = [0 for i in range(10)] batchcount = 0 for batch in dataiter: npdata = batch.data[0].asnumpy().flatten().sum() sys.stdout.flush() batchcount += 1 nplabel = batch.label[0].asnumpy() for i in range(nplabel.shape[0]): labelcount[int(nplabel[i])] += 1 for i in range(10): assert(labelcount[i] == 5000) def test_inter_methods_in_augmenter(): def test_Cifar10Rec(): get_cifar10() for inter_method in [0,1,2,3,4,9,10]: dataiter = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", max_rotate_angle=45, inter_method=inter_method) for batch in dataiter: pass def test_image_iter_exception(): def check_cifar10_exception(): get_cifar10() dataiter = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", rand_crop=False, and_mirror=False, shuffle=False, data_shape=(5, 28, 28), batch_size=100, preprocess_threads=4, prefetch_buffer=1) labelcount = [0 for i in range(10)] batchcount = 0 for batch in dataiter: pass assertRaises(MXNetError, check_cifar10_exception) def _init_NDArrayIter_data(data_type, is_image=False): if is_image: data = nd.random.uniform(0, 255, shape=(5000, 1, 28, 28)) labels = nd.ones((5000, 1)) return data, labels if data_type == 'NDArray': data = nd.ones((1000, 2, 2)) labels = nd.ones((1000, 1)) else: data = np.ones((1000, 2, 2)) labels = np.ones((1000, 1)) for i in range(1000): data[i] = i / 100 labels[i] = i / 100 return data, labels def _test_last_batch_handle(data, labels=None, is_image=False): # Test the three parameters 'pad', 'discard', 'roll_over' last_batch_handle_list = ['pad', 'discard', 'roll_over'] if labels is not None and not is_image and len(labels) != 0: labelcount_list = [(124, 100), (100, 96), (100, 96)] if is_image: batch_count_list = [40, 39, 39] else: batch_count_list = [8, 7, 7] for idx in range(len(last_batch_handle_list)): dataiter = mx.io.NDArrayIter( data, labels, 128, False, last_batch_handle=last_batch_handle_list[idx]) batch_count = 0 if labels is not None and len(labels) != 0 and not is_image: labelcount = [0 for i in range(10)] for batch in dataiter: if len(data) == 2: assert len(batch.data) == 2 if labels is not None and len(labels) != 0: if not is_image: label = batch.label[0].asnumpy().flatten() # check data if it matches corresponding labels assert((batch.data[0].asnumpy()[:, 0, 0] == label).all()) for i in range(label.shape[0]): labelcount[int(label[i])] += 1 else: assert not batch.label, 'label is not empty list' # keep the last batch of 'pad' to be used later # to test first batch of roll_over in second iteration batch_count += 1 if last_batch_handle_list[idx] == 'pad' and \ batch_count == batch_count_list[0]: cache = batch.data[0].asnumpy() # check if batchifying functionality work properly if labels is not None and len(labels) != 0 and not is_image: assert labelcount[0] == labelcount_list[idx][0], last_batch_handle_list[idx] assert labelcount[8] == labelcount_list[idx][1], last_batch_handle_list[idx] assert batch_count == batch_count_list[idx] # roll_over option dataiter.reset() assert np.array_equal(dataiter.next().data[0].asnumpy(), cache) def _test_shuffle(data, labels=None): dataiter = mx.io.NDArrayIter(data, labels, 1, False) batch_list = [] for batch in dataiter: # cache the original data batch_list.append(batch.data[0].asnumpy()) dataiter = mx.io.NDArrayIter(data, labels, 1, True) idx_list = dataiter.idx i = 0 for batch in dataiter: # check if each data point have been shuffled to corresponding positions assert np.array_equal(batch.data[0].asnumpy(), batch_list[idx_list[i]]) i += 1 def _test_corner_case(): data = np.arange(10) data_iter = mx.io.NDArrayIter(data=data, batch_size=205, shuffle=False, last_batch_handle='pad') expect = np.concatenate((np.tile(data, 20), np.arange(5))) assert np.array_equal(data_iter.next().data[0].asnumpy(), expect) def test_NDArrayIter(): dtype_list = ['NDArray', 'ndarray'] tested_data_type = [False, True] for dtype in dtype_list: for is_image in tested_data_type: data, labels = _init_NDArrayIter_data(dtype, is_image) _test_last_batch_handle(data, labels, is_image) _test_last_batch_handle([data, data], labels, is_image) _test_last_batch_handle(data=[data, data], is_image=is_image) _test_last_batch_handle( {'data1': data, 'data2': data}, labels, is_image) _test_last_batch_handle(data={'data1': data, 'data2': data}, is_image=is_image) _test_last_batch_handle(data, [], is_image) _test_last_batch_handle(data=data, is_image=is_image) _test_shuffle(data, labels) _test_shuffle([data, data], labels) _test_shuffle([data, data]) _test_shuffle({'data1': data, 'data2': data}, labels) _test_shuffle({'data1': data, 'data2': data}) _test_shuffle(data, []) _test_shuffle(data) _test_corner_case() def test_NDArrayIter_h5py(): if not h5py: return data, labels = _init_NDArrayIter_data('ndarray') try: os.remove('ndarraytest.h5') except OSError: pass with h5py.File('ndarraytest.h5') as f: f.create_dataset('data', data=data) f.create_dataset('label', data=labels) _test_last_batch_handle(f['data'], f['label']) _test_last_batch_handle(f['data'], []) _test_last_batch_handle(f['data']) try: os.remove("ndarraytest.h5") except OSError: pass def _test_NDArrayIter_csr(csr_iter, csr_iter_empty_list, csr_iter_None, num_rows, batch_size): num_batch = 0 for _, batch_empty_list, batch_empty_None in zip(csr_iter, csr_iter_empty_list, csr_iter_None): assert not batch_empty_list.label, 'label is not empty list' assert not batch_empty_None.label, 'label is not empty list' num_batch += 1 assert(num_batch == num_rows // batch_size) assertRaises(StopIteration, csr_iter.next) assertRaises(StopIteration, csr_iter_empty_list.next) assertRaises(StopIteration, csr_iter_None.next) def test_NDArrayIter_csr(): # creating toy data num_rows = rnd.randint(5, 15) num_cols = rnd.randint(1, 20) batch_size = rnd.randint(1, num_rows) shape = (num_rows, num_cols) csr, _ = rand_sparse_ndarray(shape, 'csr') dns = csr.asnumpy() # CSRNDArray or scipy.sparse.csr_matrix with last_batch_handle not equal to 'discard' will throw NotImplementedError assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': csr}, dns, batch_size) try: import scipy.sparse as spsp train_data = spsp.csr_matrix(dns) assertRaises(NotImplementedError, mx.io.NDArrayIter, {'data': train_data}, dns, batch_size) except ImportError: pass # scipy.sparse.csr_matrix with shuffle csr_iter = iter(mx.io.NDArrayIter({'data': train_data}, dns, batch_size, shuffle=True, last_batch_handle='discard')) csr_iter_empty_list = iter(mx.io.NDArrayIter({'data': train_data}, [], batch_size, shuffle=True, last_batch_handle='discard')) csr_iter_None = iter(mx.io.NDArrayIter({'data': train_data}, None, batch_size, shuffle=True, last_batch_handle='discard')) _test_NDArrayIter_csr(csr_iter, csr_iter_empty_list, csr_iter_None, num_rows, batch_size) # CSRNDArray with shuffle csr_iter = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, dns, batch_size, shuffle=True, last_batch_handle='discard')) csr_iter_empty_list = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, [], batch_size, shuffle=True, last_batch_handle='discard')) csr_iter_None = iter(mx.io.NDArrayIter({'csr_data': csr, 'dns_data': dns}, None, batch_size, shuffle=True, last_batch_handle='discard')) _test_NDArrayIter_csr(csr_iter, csr_iter_empty_list, csr_iter_None, num_rows, batch_size) # make iterators csr_iter = iter(mx.io.NDArrayIter( csr, csr, batch_size, last_batch_handle='discard')) begin = 0 for batch in csr_iter: expected = np.zeros((batch_size, num_cols)) end = begin + batch_size expected[:num_rows - begin] = dns[begin:end] if end > num_rows: expected[num_rows - begin:] = dns[0:end - num_rows] assert_almost_equal(batch.data[0].asnumpy(), expected) begin += batch_size def test_LibSVMIter(): def check_libSVMIter_synthetic(): cwd = os.getcwd() data_path = os.path.join(cwd, 'data.t') label_path = os.path.join(cwd, 'label.t') with open(data_path, 'w') as fout: fout.write('1.0 0:0.5 2:1.2\n') fout.write('-2.0\n') fout.write('-3.0 0:0.6 1:2.4 2:1.2\n') fout.write('4 2:-1.2\n') with open(label_path, 'w') as fout: fout.write('1.0\n') fout.write('-2.0 0:0.125\n') fout.write('-3.0 2:1.2\n') fout.write('4 1:1.0 2:-1.2\n') data_dir = os.path.join(cwd, 'data') data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path, data_shape=(3, ), label_shape=(3, ), batch_size=3) first = mx.nd.array([[0.5, 0., 1.2], [0., 0., 0.], [0.6, 2.4, 1.2]]) second = mx.nd.array([[0., 0., -1.2], [0.5, 0., 1.2], [0., 0., 0.]]) i = 0 for batch in iter(data_train): expected = first.asnumpy() if i == 0 else second.asnumpy() data = data_train.getdata() data.check_format(True) assert_almost_equal(data.asnumpy(), expected) i += 1 def check_libSVMIter_news_data(): news_metadata = { 'name': 'news20.t', 'origin_name': 'news20.t.bz2', 'url': "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/news20.t.bz2", 'feature_dim': 62060 + 1, 'num_classes': 20, 'num_examples': 3993, } batch_size = 33 num_examples = news_metadata['num_examples'] data_dir = os.path.join(os.getcwd(), 'data') get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'], news_metadata['origin_name']) path = os.path.join(data_dir, news_metadata['name']) data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],), batch_size=batch_size) for epoch in range(2): num_batches = 0 for batch in data_train: # check the range of labels data = batch.data[0] label = batch.label[0] data.check_format(True) assert(np.sum(label.asnumpy() > 20) == 0) assert(np.sum(label.asnumpy() <= 0) == 0) num_batches += 1 expected_num_batches = num_examples / batch_size assert(num_batches == int(expected_num_batches)), num_batches data_train.reset() def check_libSVMIter_exception(): cwd = os.getcwd() data_path = os.path.join(cwd, 'data.t') label_path = os.path.join(cwd, 'label.t') with open(data_path, 'w') as fout: fout.write('1.0 0:0.5 2:1.2\n') fout.write('-2.0\n') # Below line has a neg indice. Should throw an exception fout.write('-3.0 -1:0.6 1:2.4 2:1.2\n') fout.write('4 2:-1.2\n') with open(label_path, 'w') as fout: fout.write('1.0\n') fout.write('-2.0 0:0.125\n') fout.write('-3.0 2:1.2\n') fout.write('4 1:1.0 2:-1.2\n') data_dir = os.path.join(cwd, 'data') data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path, data_shape=(3, ), label_shape=(3, ), batch_size=3) for batch in iter(data_train): data_train.get_data().asnumpy() check_libSVMIter_synthetic() check_libSVMIter_news_data() assertRaises(MXNetError, check_libSVMIter_exception) def test_DataBatch(): from nose.tools import ok_ from mxnet.io import DataBatch import re batch = DataBatch(data=[mx.nd.ones((2, 3))]) ok_(re.match( 'DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch))) batch = DataBatch(data=[mx.nd.ones((2, 3)), mx.nd.ones( (7, 8))], label=[mx.nd.ones((4, 5))]) ok_(re.match( 'DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch))) def test_CSVIter(): def check_CSVIter_synthetic(dtype='float32'): cwd = os.getcwd() data_path = os.path.join(cwd, 'data.t') label_path = os.path.join(cwd, 'label.t') entry_str = '1' if dtype is 'int32': entry_str = '200000001' if dtype is 'int64': entry_str = '2147483648' with open(data_path, 'w') as fout: for i in range(1000): fout.write(','.join([entry_str for _ in range(8*8)]) + '\n') with open(label_path, 'w') as fout: for i in range(1000): fout.write('0\n') data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8, 8), label_csv=label_path, batch_size=100, dtype=dtype) expected = mx.nd.ones((100, 8, 8), dtype=dtype) * int(entry_str) for batch in iter(data_train): data_batch = data_train.getdata() assert_almost_equal(data_batch.asnumpy(), expected.asnumpy()) assert data_batch.asnumpy().dtype == expected.asnumpy().dtype for dtype in ['int32', 'int64', 'float32']: check_CSVIter_synthetic(dtype=dtype) def test_ImageRecordIter_seed_augmentation(): get_cifar10() seed_aug = 3 def assert_dataiter_items_equals(dataiter1, dataiter2): """ Asserts that two data iterators have the same numbner of batches, that the batches have the same number of items, and that the items are the equal. """ for batch1, batch2 in zip_longest(dataiter1, dataiter2): # ensure iterators contain the same number of batches # zip_longest will return None if on of the iterators have run out of batches assert batch1 and batch2, 'The iterators do not contain the same number of batches' # ensure batches are of same length assert len(batch1.data) == len(batch2.data), 'The returned batches are not of the same length' # ensure batch data is the same for i in range(0, len(batch1.data)): data1 = batch1.data[i].asnumpy().astype(np.uint8) data2 = batch2.data[i].asnumpy().astype(np.uint8) assert(np.array_equal(data1, data2)) def assert_dataiter_items_not_equals(dataiter1, dataiter2): """ Asserts that two data iterators have the same numbner of batches, that the batches have the same number of items, and that the items are the _not_ equal. """ for batch1, batch2 in zip_longest(dataiter1, dataiter2): # ensure iterators are of same length # zip_longest will return None if on of the iterators have run out of batches assert batch1 and batch2, 'The iterators do not contain the same number of batches' # ensure batches are of same length assert len(batch1.data) == len(batch2.data), 'The returned batches are not of the same length' # ensure batch data is the same for i in range(0, len(batch1.data)): data1 = batch1.data[i].asnumpy().astype(np.uint8) data2 = batch2.data[i].asnumpy().astype(np.uint8) if not np.array_equal(data1, data2): return assert False, 'Expected data iterators to be different, but they are the same' # check whether to get constant images after fixing seed_aug dataiter1 = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", shuffle=False, data_shape=(3, 28, 28), batch_size=3, rand_crop=True, rand_mirror=True, max_random_scale=1.3, max_random_illumination=3, max_rotate_angle=10, random_l=50, random_s=40, random_h=10, max_shear_ratio=2, seed_aug=seed_aug) dataiter2 = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", shuffle=False, data_shape=(3, 28, 28), batch_size=3, rand_crop=True, rand_mirror=True, max_random_scale=1.3, max_random_illumination=3, max_rotate_angle=10, random_l=50, random_s=40, random_h=10, max_shear_ratio=2, seed_aug=seed_aug) assert_dataiter_items_equals(dataiter1, dataiter2) # check whether to get different images after change seed_aug dataiter1.reset() dataiter2 = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", shuffle=False, data_shape=(3, 28, 28), batch_size=3, rand_crop=True, rand_mirror=True, max_random_scale=1.3, max_random_illumination=3, max_rotate_angle=10, random_l=50, random_s=40, random_h=10, max_shear_ratio=2, seed_aug=seed_aug+1) assert_dataiter_items_not_equals(dataiter1, dataiter2) # check whether seed_aug changes the iterator behavior dataiter1 = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", shuffle=False, data_shape=(3, 28, 28), batch_size=3, seed_aug=seed_aug) dataiter2 = mx.io.ImageRecordIter( path_imgrec="data/cifar/train.rec", mean_img="data/cifar/cifar10_mean.bin", shuffle=False, data_shape=(3, 28, 28), batch_size=3, seed_aug=seed_aug) assert_dataiter_items_equals(dataiter1, dataiter2) if __name__ == "__main__": test_NDArrayIter() if h5py: test_NDArrayIter_h5py() test_MNISTIter() test_Cifar10Rec() test_LibSVMIter() test_NDArrayIter_csr() test_CSVIter() test_ImageRecordIter_seed_augmentation() test_image_iter_exception()
apache-2.0
yize/grunt-tps
tasks/lib/python/Lib/python2.7/collections.py
35
25883
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict'] # For bootstrapping reasons, the collection ABCs are defined in _abcoll.py. # They should however be considered an integral part of collections.py. from _abcoll import * import _abcoll __all__ += _abcoll.__all__ from _collections import deque, defaultdict from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from itertools import imap as _imap try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident ################################################################################ ### OrderedDict ################################################################################ class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] return dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, _ = self.__map.pop(key) link_prev[1] = link_next # update link_prev[NEXT] link_next[0] = link_prev # update link_next[PREV] def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root[1] # start at the first node while curr is not root: yield curr[2] # yield the curr[KEY] curr = curr[1] # move to next node def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root[0] # start at the last node while curr is not root: yield curr[2] # yield the curr[KEY] curr = curr[0] # move to previous node def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root[:] = [root, root, None] self.__map.clear() dict.clear(self) # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) pairs in od' for k in self: yield (k, self[k]) update = MutableMapping.update __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') key = next(reversed(self) if last else iter(self)) value = self.pop(key) return key, value def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(_imap(_eq, self, other)) return dict.__eq__(self, other) def __ne__(self, other): 'od.__ne__(y) <==> od!=y' return not self == other # -- the following methods support python 3.x style dictionary views -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) ################################################################################ ### namedtuple ################################################################################ _class_template = '''\ class {typename}(tuple): '{typename}({arg_list})' __slots__ = () _fields = {field_names!r} def __new__(_cls, {arg_list}): 'Create new instance of {typename}({arg_list})' return _tuple.__new__(_cls, ({arg_list})) @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): 'Make a new {typename} object from a sequence or iterable' result = new(cls, iterable) if len(result) != {num_fields:d}: raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) return result def __repr__(self): 'Return a nicely formatted representation string' return '{typename}({repr_fmt})' % self def _asdict(self): 'Return a new OrderedDict which maps field names to their values' return OrderedDict(zip(self._fields, self)) def _replace(_self, **kwds): 'Return a new {typename} object replacing specified fields with new values' result = _self._make(map(kwds.pop, {field_names!r}, _self)) if kwds: raise ValueError('Got unexpected field names: %r' % kwds.keys()) return result def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) {field_defs} ''' _repr_template = '{name}=%r' _field_template = '''\ {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') ''' def namedtuple(typename, field_names, verbose=False, rename=False): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessable by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) if rename: seen = set() for index, name in enumerate(field_names): if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name) or not name or name[0].isdigit() or name.startswith('_') or name in seen): field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' 'keyword: %r' % name) if name[0].isdigit(): raise ValueError('Type names and field names cannot start with ' 'a number: %r' % name) seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' '%r' % name) if name in seen: raise ValueError('Encountered duplicate field name: %r' % name) seen.add(name) # Fill-in the class template class_definition = _class_template.format( typename = typename, field_names = tuple(field_names), num_fields = len(field_names), arg_list = repr(tuple(field_names)).replace("'", "")[1:-1], repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names), field_defs = '\n'.join(_field_template.format(index=index, name=name) for index, name in enumerate(field_names)) ) if verbose: print class_definition # Execute the template string in a temporary namespace and support # tracing utilities by setting a value for frame.f_globals['__name__'] namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, OrderedDict=OrderedDict, _property=property, _tuple=tuple) try: exec class_definition in namespace except SyntaxError as e: raise SyntaxError(e.message + ':\n' + class_definition) result = namespace[typename] # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in enviroments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython). try: result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass return result ######################################################################## ### Counter ######################################################################## class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(self, iterable=None, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' super(Counter, self).__init__() self.update(iterable, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abcdeabcdabcaba').most_common(3) [('a', 5), ('b', 4), ('c', 3)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.iteritems(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.iteritems())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because setting v=1 # means that no element can have a count greater than one. raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: super(Counter, self).update(iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds) def subtract(self, iterable=None, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if iterable is not None: self_get = self.get if isinstance(iterable, Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super(Counter, self).__delitem__(elem) def __repr__(self): if not self: return '%s()' % self.__class__.__name__ items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result if __name__ == '__main__': # verify that instances can be pickled from cPickle import loads, dumps Point = namedtuple('Point', 'x, y', True) p = Point(x=10, y=20) assert p == loads(dumps(p)) # test and demonstrate ability to override methods class Point(namedtuple('Point', 'x y')): __slots__ = () @property def hypot(self): return (self.x ** 2 + self.y ** 2) ** 0.5 def __str__(self): return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot) for p in Point(3, 4), Point(14, 5/7.): print p class Point(namedtuple('Point', 'x y')): 'Point class with optimized _make() and _replace() without error-checking' __slots__ = () _make = classmethod(tuple.__new__) def _replace(self, _map=map, **kwds): return self._make(_map(kwds.get, ('x', 'y'), self)) print Point(11, 22)._replace(x=100) Point3D = namedtuple('Point3D', Point._fields + ('z',)) print Point3D.__doc__ import doctest TestResults = namedtuple('TestResults', 'failed attempted') print TestResults(*doctest.testmod())
mit
xupei0610/ComputerGraphics-HW
hw4/lib/assimp/contrib/gtest/test/gtest_filter_unittest.py
364
21325
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re try: from sets import Set as set # For Python 2.3 compatibility except ImportError: pass import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(set(set_var), set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(set(tests_to_run) - set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
mit
CACTUS-Mission/TRAPSat
TRAPSat_cFS/cfs/cfe/tools/cFS-GroundSystem/MainWindow.py
1
7489
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'MainWindow.ui' # # Created: Wed Jun 24 09:56:47 2015 # by: PyQt4 UI code generator 4.11.3 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.setEnabled(True) MainWindow.resize(420, 217) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(24) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth()) MainWindow.setSizePolicy(sizePolicy) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.labelHomeTittle = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(22) font.setBold(False) font.setWeight(50) self.labelHomeTittle.setFont(font) self.labelHomeTittle.setAlignment(QtCore.Qt.AlignCenter) self.labelHomeTittle.setObjectName(_fromUtf8("labelHomeTittle")) self.verticalLayout.addWidget(self.labelHomeTittle) self.line_2 = QtGui.QFrame(self.centralwidget) self.line_2.setFrameShape(QtGui.QFrame.HLine) self.line_2.setFrameShadow(QtGui.QFrame.Sunken) self.line_2.setObjectName(_fromUtf8("line_2")) self.verticalLayout.addWidget(self.line_2) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.label_3 = QtGui.QLabel(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth()) self.label_3.setSizePolicy(sizePolicy) self.label_3.setMinimumSize(QtCore.QSize(141, 0)) self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_3.setObjectName(_fromUtf8("label_3")) self.horizontalLayout.addWidget(self.label_3) self.comboBoxIpAddresses = QtGui.QComboBox(self.centralwidget) self.comboBoxIpAddresses.setMinimumSize(QtCore.QSize(132, 0)) self.comboBoxIpAddresses.setMaximumSize(QtCore.QSize(132, 16777215)) self.comboBoxIpAddresses.setObjectName(_fromUtf8("comboBoxIpAddresses")) self.comboBoxIpAddresses.addItem(_fromUtf8("")) self.horizontalLayout.addWidget(self.comboBoxIpAddresses) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setSpacing(32) self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.pushButtonStartTlm = QtGui.QPushButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.pushButtonStartTlm.sizePolicy().hasHeightForWidth()) self.pushButtonStartTlm.setSizePolicy(sizePolicy) self.pushButtonStartTlm.setObjectName(_fromUtf8("pushButtonStartTlm")) self.horizontalLayout_2.addWidget(self.pushButtonStartTlm) self.pushButtonStartCmd = QtGui.QPushButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.pushButtonStartCmd.sizePolicy().hasHeightForWidth()) self.pushButtonStartCmd.setSizePolicy(sizePolicy) self.pushButtonStartCmd.setObjectName(_fromUtf8("pushButtonStartCmd")) self.horizontalLayout_2.addWidget(self.pushButtonStartCmd) self.verticalLayout.addLayout(self.horizontalLayout_2) self.line = QtGui.QFrame(self.centralwidget) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.verticalLayout.addWidget(self.line) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.label_5 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(11) self.label_5.setFont(font) self.label_5.setObjectName(_fromUtf8("label_5")) self.horizontalLayout_4.addWidget(self.label_5) self.pushButton = QtGui.QPushButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth()) self.pushButton.setSizePolicy(sizePolicy) self.pushButton.setObjectName(_fromUtf8("pushButton")) self.horizontalLayout_4.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout_4) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.close) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Main Window", None)) self.labelHomeTittle.setText(_translate("MainWindow", "cFS Ground System", None)) self.label_3.setText(_translate("MainWindow", "Selected IP Address: ", None)) self.comboBoxIpAddresses.setItemText(0, _translate("MainWindow", "All", None)) self.pushButtonStartTlm.setText(_translate("MainWindow", "Start Telemetry System", None)) self.pushButtonStartCmd.setText(_translate("MainWindow", "Start Command System", None)) self.label_5.setText(_translate("MainWindow", "*Read Guide-GroundSystem.txt for help", None)) self.pushButton.setText(_translate("MainWindow", "Close", None)) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
mit
plum-umd/pasket
pasket/rewrite/proxy.py
1
3168
import copy as cp import logging import lib.const as C import lib.visit as v from .. import util from ..meta import class_lookup from ..meta.template import Template from ..meta.clazz import Clazz from ..meta.method import Method from ..meta.field import Field from ..meta.statement import Statement, to_statements from ..meta.expression import Expression class Proxy(object): @classmethod def find_proxy(cls): return lambda anno: anno.by_name(C.A.PROXY) def __init__(self, smpls): self._smpls = smpls @v.on("node") def visit(self, node): """ This is the generic method to initialize the dynamic dispatcher """ @v.when(Template) def visit(self, node): pass ## @Proxy(P) ## class C { } ## => ## class C { ## P _proxy; ## C(P p) { _proxy = p; } ## m$i$(...) { _proxy.m$i$(...); } ## } @v.when(Clazz) def visit(self, node): if not util.exists(Proxy.find_proxy(), node.annos): return _anno = util.find(Proxy.find_proxy(), node.annos) if hasattr(_anno, "cid"): proxy = _anno.cid elif len(node.sups) == 1: proxy = node.sups[0] else: raise Exception("ambiguous proxy", _anno) cname = node.name logging.debug("reducing: @{}({}) class {}".format(C.A.PROXY, proxy, cname)) cls_p = class_lookup(proxy) setattr(node, "proxy", cls_p) # introduce a field to hold the proxy instance: P _proxy fld = Field(clazz=node, typ=proxy, name=u"_proxy") node.add_flds([fld]) node.init_fld(fld) # if purely empty proxy # add methods that delegate desired operations to the proxy if not node.mtds: for mtd_p in cls_p.mtds: mname = mtd.name mtd_cp = cp.deepcopy(mtd_p) mtd_cp.clazz = node if not mtd_p.is_init: args = ", ".join(map(lambda (_, nm): nm, mtd_p.params)) body = u"_proxy.{mname}({args});".format(**locals()) if mtd_p.typ != C.J.v: body = u"return " + body else: # cls' own <init>: cname(P p) { _proxy = p; } mtd_cp.name = cname mtd_cp.typ = cname mtd_cp.params = [ (proxy, u"x") ] body = u"_proxy = x;" mtd_cp.body = to_statements(mtd_p, body) logging.debug("{} ~> {}".format(mtd_cp.signature, mtd_p.signature)) node.add_mtds([mtd_cp]) @v.when(Field) def visit(self, node): pass @v.when(Method) def visit(self, node): if not hasattr(node.clazz, "proxy"): return cls_p = getattr(node.clazz, "proxy") mtd_p = cls_p.mtd_by_sig(node.name, node.param_typs) if mtd_p: # method delegation logging.debug("{} ~> {}".format(node.signature, mtd_p.signature)) mname = node.name args = ", ".join(map(lambda (_, nm): nm, node.params)) body = u"_proxy.{mname}({args});".format(**locals()) if node.typ != C.J.v: body = u"return " + body node.body = to_statements(node, body) mname = node.name if mname.startswith("get") and mname.endswith(cls_p.name): body = u"return _proxy;" node.body = to_statements(node, body) @v.when(Statement) def visit(self, node): return [node] @v.when(Expression) def visit(self, node): return node
mit
undoware/neutron-drive
google_appengine/lib/PyAMF/pyamf/tests/test_imports.py
26
2326
# Copyright (c) The PyAMF Project. # See LICENSE.txt for details. """ Tests pyamf.util.imports @since: 0.3.1 """ import unittest import sys import os.path from pyamf.util import imports class InstalledTestCase(unittest.TestCase): """ Tests to ensure that L{imports.finder} is installed in L{sys.meta_path} """ def test_installed(self): f = imports.finder self.assertTrue(f in sys.meta_path) self.assertIdentical(sys.meta_path[0], f) class ImportsTestCase(unittest.TestCase): def setUp(self): self.finder = imports.finder self._state = self.finder.__getstate__() path = os.path.join(os.path.dirname(__file__), 'imports') sys.path.insert(0, path) def tearDown(self): self.finder.__setstate__(self._state) del sys.path[0] self._clearModules('spam') def _clearModules(self, *args): for mod in args: for k, v in sys.modules.copy().iteritems(): if k.startswith(mod) or k == 'pyamf.tests.%s' % (mod,): del sys.modules[k] class WhenImportedTestCase(ImportsTestCase): """ Tests for L{imports.when_imported} """ def setUp(self): ImportsTestCase.setUp(self) self.executed = False def _hook(self, module): self.executed = True def _check_module(self, mod): name = mod.__name__ self.assertTrue(name in sys.modules) self.assertIdentical(sys.modules[name], mod) def test_import(self): imports.when_imported('spam', self._hook) self.assertFalse(self.executed) import spam self._check_module(spam) self.assertTrue(self.executed) def test_already_imported(self): import spam self.assertFalse(self.executed) imports.when_imported('spam', self._hook) self._check_module(spam) self.assertTrue(self.executed) def test_failed_hook(self): def h(mod): raise RuntimeError imports.when_imported('spam', h) try: import spam except Exception, e: pass else: self.fail('expected exception') self.assertFalse('spam' in self.finder.loaded_modules) self.assertEqual(e.__class__, RuntimeError)
bsd-3-clause
replicatorg/ReplicatorG
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/analyze_plugins/analyze_utilities/tableau.py
6
34046
""" Tableau has a couple of base classes for analyze viewers. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.hidden_scrollbar import HiddenScrollbar from fabmetheus_utilities import archive from fabmetheus_utilities import euclidean from fabmetheus_utilities import gcodec from fabmetheus_utilities import settings from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import zoom_in from skeinforge_application.skeinforge_plugins.analyze_plugins.analyze_utilities import zoom_out import math import os __author__ = 'Enrique Perez (perez_enrique@yahoo.com)' __date__ = '$Date: 2008/21/04 $' __license__ = 'GPL 3.0' def getGeometricDifference( first, second ): "Get the geometric difference of the two numbers." return max( first, second ) / min( first, second ) def getGridHorizontalFrame( gridPosition ): "Get the grid horizontal object with a frame from the grid position." gridHorizontal = settings.GridHorizontal( 0, 0 ) gridHorizontal.master = settings.Tkinter.Frame( gridPosition.master, borderwidth = 1, padx = 3, relief = 'raised') gridHorizontal.master.grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.E ) return gridHorizontal def getLengthMinusOneMinimumOne( elementList ): "Get the length of the length minus one, minimum one." return max( 1, len( elementList ) - 1 ) def getScrollbarCanvasPortion( scrollbar ): "Get the canvas portion of the scrollbar." scrollbarBeginEnd = scrollbar.get() return scrollbarBeginEnd[1] - scrollbarBeginEnd[0] def setStateNormalDisabled( active, widget ): "Set the state of the widget to normal if active and disabled if inactive." if active: widget.config( state = settings.Tkinter.NORMAL ) else: widget.config( state = settings.Tkinter.DISABLED ) def startMainLoopFromWindow( tableauWindow ): "Display the tableau window and start the main loop." if tableauWindow == None: print('Warning, tableauWindow in startMainLoopFromWindow in tableau is none, so the window will not be displayed.') else: tableauWindow.root.mainloop() class ColoredLine: "A colored index line." def __init__( self, begin, colorName, displayString, end, tagString ): "Set the color name and corners." self.begin = begin self.colorName = colorName self.displayString = displayString self.end = end self.tagString = tagString def __repr__(self): "Get the string representation of this colored index line." return '%s, %s, %s, %s' % ( self.colorName, self.begin, self.end, self.tagString ) class ExportCanvasDialog: "A class to display the export canvas repository dialog." def addPluginToMenu( self, canvas, fileName, menu, name, suffix ): "Add the display command to the menu." self.canvas = canvas self.fileName = fileName self.name = name self.suffix = suffix menu.add_command( label = settings.getEachWordCapitalized( self.name ), command = self.display ) def display(self): "Display the export canvas repository dialog." for repositoryDialog in settings.globalRepositoryDialogListTable: if repositoryDialog.repository.lowerName == self.name: repositoryDialog.setCanvasFileNameSuffix( self.canvas, self.skein.fileName, self.suffix ) settings.liftRepositoryDialogs( settings.globalRepositoryDialogListTable[ repositoryDialog ] ) return exportCanvasPluginsFolderPath = archive.getAbsoluteFolderPath( os.path.dirname( __file__ ), 'export_canvas_plugins') pluginModule = archive.getModuleWithDirectoryPath( exportCanvasPluginsFolderPath, self.name ) if pluginModule == None: return None pluginRepository = pluginModule.getNewRepository() pluginRepository.setCanvasFileNameSuffix( self.canvas, self.fileName, self.suffix ) settings.getDisplayedDialogFromConstructor( pluginRepository ) class TableauRepository: "The viewer base repository class." def addAnimation(self): "Add the animation settings." self.frameList = settings.FrameList().getFromValue('Frame List', self, [] ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Animation -', self ) self.animationLineQuickening = settings.FloatSpinUpdate().getFromValue( 0.5, 'Animation Line Quickening (ratio):', self, 4.5, 1.0 ) self.animationSlideShowRate = settings.FloatSpinUpdate().getFromValue( 1.0, 'Animation Slide Show Rate (layers/second):', self, 5.0, 2.0 ) settings.LabelSeparator().getFromRepository(self) def addScaleScreenSlide(self): "Add the scale, screen and slide show settings." self.scale = settings.FloatSpinNotOnMenu().getFromValue( 10.0, 'Scale (pixels per millimeter):', self, 50.0, 15.0 ) settings.LabelSeparator().getFromRepository(self) settings.LabelDisplay().getFromName('- Screen Inset -', self ) self.screenHorizontalInset = settings.IntSpin().getFromValue( 80, 'Screen Horizontal Inset (pixels):', self, 1000, 100 ) self.screenVerticalInset = settings.IntSpin().getFromValue( 120, 'Screen Vertical Inset (pixels):', self, 1000, 220 ) settings.LabelSeparator().getFromRepository(self) def setToDisplaySave(self, event=None): "Set the setting values to the display, save the new values." for menuEntity in self.menuEntities: if menuEntity in self.preferences: menuEntity.setToDisplay() settings.writeSettings(self) class TableauWindow: def activateMouseModeTool(self): "Activate the mouse mode tool." self.repository.setToDisplaySave() self.canvas.focus_set() self.createMouseModeTool() self.mouseTool.update() def addCanvasMenuRootScrollSkein( self, repository, skein, suffix, title ): "Add the canvas, menu bar, scroll bar, skein panes, tableau repository, root and skein." self.imagesDirectoryPath = archive.getFabmetheusUtilitiesPath('images') self.movementTextID = None self.mouseInstantButtons = [] self.photoImages = {} self.repository = repository self.root = settings.Tkinter.Tk() self.gridPosition = settings.GridVertical( 0, 1 ) self.gridPosition.master = self.root self.root.title( os.path.basename( skein.fileName ) + ' - ' + title ) self.rulingExtent = 24 self.rulingTargetSeparation = 150.0 self.screenSize = skein.screenSize self.skein = skein self.skeinPanes = skein.skeinPanes self.suffix = suffix self.timerID = None repository.animationSlideShowRate.value = max( repository.animationSlideShowRate.value, 0.01 ) repository.animationSlideShowRate.value = min( repository.animationSlideShowRate.value, 85.0 ) repository.drawArrows.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) repository.goAroundExtruderOffTravel.setUpdateFunction( self.setWindowToDisplaySavePhoenixUpdate ) repository.layerExtraSpan.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) repository.widthOfSelectionThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) repository.widthOfTravelThread.setUpdateFunction( self.setWindowToDisplaySaveUpdate ) repository.window = self for menuRadio in repository.mouseMode.menuRadios: fileName = menuRadio.name.lower() fileName = fileName.replace(' ', '_') + '.ppm' menuRadio.mouseButton = self.getPhotoButtonGridIncrement( menuRadio.invoke, fileName, self.gridPosition ) self.gridPosition = settings.GridHorizontal( 1, 99 ) self.gridPosition.master = self.root self.xScrollbar = HiddenScrollbar( self.root, orient = settings.Tkinter.HORIZONTAL ) self.xScrollbar.grid( row = 98, column = 2, columnspan = 96, sticky = settings.Tkinter.E + settings.Tkinter.W ) self.yScrollbar = HiddenScrollbar( self.root ) self.yScrollbar.grid( row = 1, rowspan = 97, column = 99, sticky = settings.Tkinter.N + settings.Tkinter.S ) self.canvasHeight = min( int( skein.screenSize.imag ), self.root.winfo_screenheight() - repository.screenVerticalInset.value ) self.canvasWidth = min( int( skein.screenSize.real ), self.root.winfo_screenwidth() - repository.screenHorizontalInset.value ) scrollRegionBoundingBox = ( 0, 0, int( skein.screenSize.real ), int( skein.screenSize.imag ) ) self.canvas = settings.Tkinter.Canvas( self.root, highlightthickness = 3, width = self.canvasWidth, height = self.canvasHeight, scrollregion = scrollRegionBoundingBox ) self.canvas.grid( row = 1, rowspan = 97, column = 2, columnspan = 96, sticky = settings.Tkinter.E + settings.Tkinter.W + settings.Tkinter.N + settings.Tkinter.S ) self.fileHelpMenuBar = settings.FileHelpMenuBar( self.root ) self.exportMenu = settings.Tkinter.Menu( self.fileHelpMenuBar.fileMenu, tearoff = 0 ) self.fileHelpMenuBar.fileMenu.add_cascade( label = "Export", menu = self.exportMenu, underline = 0 ) exportCanvasPluginsFolderPath = archive.getAbsoluteFolderPath( os.path.dirname( __file__ ), 'export_canvas_plugins') exportCanvasPluginFileNames = archive.getPluginFileNamesFromDirectoryPath( exportCanvasPluginsFolderPath ) for exportCanvasPluginFileName in exportCanvasPluginFileNames: ExportCanvasDialog().addPluginToMenu( self.canvas, skein.fileName, self.exportMenu, exportCanvasPluginFileName, suffix ) self.fileHelpMenuBar.fileMenu.add_separator() self.fileHelpMenuBar.completeMenu( self.close, repository, self.save, self ) def addLayer( self, gridPosition ): "Add the layer frame items." self.diveButton = self.getPhotoButtonGridIncrement( self.dive, 'dive.ppm', gridPosition ) self.soarButton = self.getPhotoButtonGridIncrement( self.soar, 'soar.ppm', gridPosition ) gridPosition.increment() settings.Tkinter.Label( gridPosition.master, text = 'Layer:').grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) gridPosition.increment() self.limitIndex() self.layerEntry = settings.Tkinter.Spinbox( gridPosition.master, command = self.layerEntryReturnPressed, from_ = 0, increment = 1, to = getLengthMinusOneMinimumOne( self.skeinPanes ) ) self.layerEntry.bind('<Return>', self.layerEntryReturnPressed ) self.layerEntry.grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) def addLine( self, gridPosition ): "Add the line frame items." self.lineDiveButton = self.getPhotoButtonGridIncrement( self.lineDive, 'dive.ppm', gridPosition ) self.lineSoarButton = self.getPhotoButtonGridIncrement( self.lineSoar, 'soar.ppm', gridPosition ) gridPosition.increment() settings.Tkinter.Label( gridPosition.master, text = 'Line:').grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) gridPosition.increment() self.lineEntry = settings.Tkinter.Spinbox( gridPosition.master, command = self.lineEntryReturnPressed, from_ = 0, increment = 1, to = getLengthMinusOneMinimumOne( self.getColoredLines() ) ) self.lineEntry.bind('<Return>', self.lineEntryReturnPressed ) self.lineEntry.grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) def addMouseInstantTool( self, fileName, gridPosition, mouseInstantTool ): "Add the mouse instant tool and derived photo button." mouseInstantTool.getReset(self) photoButton = self.getPhotoButtonGridIncrement( mouseInstantTool.click, fileName, gridPosition ) mouseInstantTool.mouseButton = photoButton self.mouseInstantButtons.append( photoButton ) def addMouseToolsBind(self): "Add the mouse tool and bind button one clicked, button one released and motion." self.xScrollbar.config( command = self.relayXview ) self.yScrollbar.config( command = self.relayYview ) self.canvas['xscrollcommand'] = self.xScrollbar.set self.canvas['yscrollcommand'] = self.yScrollbar.set settings.CloseListener( self, self.destroyAllDialogWindows ).listenToWidget( self.canvas ) self.canvasScreenCenter = 0.5 * complex( float( self.canvasWidth ) / float( self.screenSize.real ), float( self.canvasHeight ) / float( self.screenSize.imag ) ) self.addPhotoImage('stop.ppm', self.gridPosition ) self.gridPosition.increment() self.addLayer( getGridHorizontalFrame( self.gridPosition ) ) self.gridPosition.increment() self.addLine( getGridHorizontalFrame( self.gridPosition ) ) self.gridPosition.increment() self.addScale( getGridHorizontalFrame( self.gridPosition ) ) self.gridPosition = settings.GridVertical( self.gridPosition.columnStart + 1, self.gridPosition.row ) self.gridPosition.master = self.root for name in self.repository.frameList.value: entity = self.getEntityFromName( name ) if entity != None: self.gridPosition.incrementGivenNumberOfColumns( 3 ) entity.addToDialog( getGridHorizontalFrame( self.gridPosition ) ) for menuRadio in self.repository.mouseMode.menuRadios: menuRadio.mouseTool = menuRadio.getNewMouseToolFunction().getReset(self) self.mouseTool = menuRadio.mouseTool self.createMouseModeTool() self.canvas.bind('<Button-1>', self.button1 ) self.canvas.bind('<ButtonRelease-1>', self.buttonRelease1 ) self.canvas.bind('<KeyPress-Down>', self.keyPressDown ) self.canvas.bind('<KeyPress-Left>', self.keyPressLeft ) self.canvas.bind('<KeyPress-Right>', self.keyPressRight ) self.canvas.bind('<KeyPress-Up>', self.keyPressUp ) self.canvas.bind('<Motion>', self.motion ) self.canvas.bind('<Return>', self.keyPressReturn ) self.canvas.bind('<Shift-ButtonRelease-1>', self.shiftButtonRelease1 ) self.canvas.bind('<Shift-Motion>', self.shiftMotion ) self.layerEntry.bind('<Destroy>', self.cancelTimer ) self.root.grid_columnconfigure( 44, weight = 1 ) self.root.grid_rowconfigure( 44, weight = 1 ) self.resetPeriodicButtonsText() self.repository.animationLineQuickening.setUpdateFunction( self.repository.setToDisplaySave ) self.repository.animationSlideShowRate.setUpdateFunction( self.repository.setToDisplaySave ) self.repository.screenHorizontalInset.setUpdateFunction( self.redisplayWindowUpdate ) self.repository.screenVerticalInset.setUpdateFunction( self.redisplayWindowUpdate ) rankZeroSeperation = self.getRulingSeparationWidthPixels( 0 ) zoom = self.rulingTargetSeparation / rankZeroSeperation self.rank = euclidean.getRank( zoom ) rankTop = self.rank + 1 seperationBottom = self.getRulingSeparationWidthPixels( self.rank ) seperationTop = self.getRulingSeparationWidthPixels( rankTop ) bottomDifference = getGeometricDifference( self.rulingTargetSeparation, seperationBottom ) topDifference = getGeometricDifference( self.rulingTargetSeparation, seperationTop ) if topDifference < bottomDifference: self.rank = rankTop self.rulingSeparationWidthMillimeters = euclidean.getIncrementFromRank( self.rank ) self.canvas.focus_set() def addPhotoImage( self, fileName, gridPosition ): "Get a PhotoImage button, grid the button and increment the grid position." photoImage = None try: photoImage = settings.Tkinter.PhotoImage( file = os.path.join( self.imagesDirectoryPath, fileName ), master = gridPosition.master ) except: print('Image %s was not found in the images directory, so a text button will be substituted.' % fileName ) untilDotFileName = archive.getUntilDot(fileName) self.photoImages[ untilDotFileName ] = photoImage return untilDotFileName def addScale( self, gridPosition ): "Add the line frame items." self.addMouseInstantTool('zoom_out.ppm', gridPosition, zoom_out.getNewMouseTool() ) self.addMouseInstantTool('zoom_in.ppm', gridPosition, zoom_in.getNewMouseTool() ) gridPosition.increment() settings.Tkinter.Label( gridPosition.master, text = 'Scale:').grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) gridPosition.increment() self.scaleEntry = settings.Tkinter.Spinbox( gridPosition.master, command = self.scaleEntryReturnPressed, from_ = 10.0, increment = 5.0, to = 100.0 ) self.scaleEntry.bind('<Return>', self.scaleEntryReturnPressed ) self.scaleEntry.grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) def addSettingsMenuSetWindowGeometry( self, center ): "Add the settings menu, center the scroll region, update, and set the window geometry." self.settingsMenu = settings.Tkinter.Menu( self.fileHelpMenuBar.menuBar, tearoff = 0 ) self.fileHelpMenuBar.addMenuToMenuBar( "Settings", self.settingsMenu ) settings.addMenuEntitiesToMenuFrameable( self.settingsMenu, self.repository.menuEntities ) self.relayXview( settings.Tkinter.MOVETO, center.real - self.canvasScreenCenter.real ) self.relayYview( settings.Tkinter.MOVETO, center.imag - self.canvasScreenCenter.imag ) self.root.withdraw() self.root.update_idletasks() movedGeometryString = '%sx%s+%s' % ( self.root.winfo_reqwidth(), self.root.winfo_reqheight(), '0+0') self.root.geometry( movedGeometryString ) def button1(self, event): "The button was clicked." self.mouseTool.button1(event) def buttonRelease1(self, event): "The button was released." self.mouseTool.buttonRelease1(event) def cancel(self, event=None): "Set all entities to their saved state." settings.cancelRepository(self.repository) def cancelTimer(self, event=None): "Cancel the timer and set it to none." if self.timerID != None: self.canvas.after_cancel(self.timerID) self.timerID = None def cancelTimerResetButtons(self): "Cancel the timer and set it to none." self.cancelTimer() self.resetPeriodicButtonsText() def close(self, event=None): "The dialog was closed." try: self.root.after( 1, self.root.destroy ) # to get around 'Font Helvetica -12 still in cache.' segmentation bug, instead of simply calling self.root.destroy() except: pass def createMouseModeTool(self): "Create the mouse mode tool." self.destroyMouseToolRaiseMouseButtons() for menuRadio in self.repository.mouseMode.menuRadios: if menuRadio.value: self.mouseTool = menuRadio.mouseTool menuRadio.mouseButton['relief'] = settings.Tkinter.SUNKEN def destroyAllDialogWindows(self): "Destroy all the dialog windows." settings.writeSettings(self.repository) return for menuEntity in self.repository.menuEntities: lowerName = menuEntity.name.lower() if lowerName in settings.globalRepositoryDialogListTable: globalRepositoryDialogValues = settings.globalRepositoryDialogListTable[ lowerName ] for globalRepositoryDialogValue in globalRepositoryDialogValues: settings.quitWindow( globalRepositoryDialogValue.root ) def destroyMouseToolRaiseMouseButtons(self): "Destroy the mouse tool and raise the mouse buttons." self.mouseTool.destroyEverything() for menuRadio in self.repository.mouseMode.menuRadios: menuRadio.mouseButton['relief'] = settings.Tkinter.RAISED for mouseInstantButton in self.mouseInstantButtons: mouseInstantButton['relief'] = settings.Tkinter.RAISED def dive(self): "Dive, go down periodically." oldDiveButtonText = self.diveButton['text'] self.cancelTimerResetButtons() if oldDiveButtonText == 'stop': return self.diveCycle() def diveCycle(self): "Start the dive cycle." self.cancelTimer() self.repository.layer.value -= 1 self.update() if self.repository.layer.value < 1: self.resetPeriodicButtonsText() return self.setButtonImageText( self.diveButton, 'stop') self.timerID = self.canvas.after( self.getSlideShowDelay(), self.diveCycle ) def getAnimationLineDelay( self, coloredLine ): "Get the animation line delay in milliseconds." # maybe later, add animation along line # nextLayerIndex = self.repository.layer.value # nextLineIndex = self.repository.line.value + 1 # coloredLinesLength = len( self.getColoredLines() ) # self.skein.feedRateMinute # if nextLineIndex >= coloredLinesLength: # if nextLayerIndex + 1 < len( self.skeinPanes ): # nextLayerIndex += 1 # nextLineIndex = 0 # else: # nextLineIndex = self.repository.line.value splitLine = gcodec.getSplitLineBeforeBracketSemicolon( coloredLine.displayString ) self.skein.feedRateMinute = gcodec.getFeedRateMinute( self.skein.feedRateMinute, splitLine ) feedRateSecond = self.skein.feedRateMinute / 60.0 coloredLineLength = abs( coloredLine.end - coloredLine.begin ) / self.repository.scale.value duration = coloredLineLength / feedRateSecond animationLineDelay = int( round( 1000.0 * duration / self.repository.animationLineQuickening.value ) ) return max( animationLineDelay, 1 ) def getDrawnLineText( self, location, tags, text ): "Get the line text drawn on the canvas." anchorTowardCenter = settings.Tkinter.N if location.imag > float( self.canvasHeight ) * 0.1: anchorTowardCenter = settings.Tkinter.S if location.real > float( self.canvasWidth ) * 0.7: anchorTowardCenter += settings.Tkinter.E else: anchorTowardCenter += settings.Tkinter.W return self.canvas.create_text( int( location.real ), int( location.imag ), anchor = anchorTowardCenter, tags = tags, text = text ) def getEntityFromName(self, name): "Get the entity of the given name." for entity in self.repository.displayEntities: if entity.name == name: return entity return None def getPhotoButtonGridIncrement( self, commandFunction, fileName, gridPosition ): "Get a PhotoImage button, grid the button and increment the grid position." gridPosition.increment() untilDotFileName = self.addPhotoImage( fileName, gridPosition ) photoImage = self.photoImages[ untilDotFileName ] photoButton = settings.Tkinter.Button( gridPosition.master, activebackground = 'black', activeforeground = 'white', command = commandFunction, text = untilDotFileName ) if photoImage != None: photoButton['image'] = photoImage photoButton.grid( row = gridPosition.row, column = gridPosition.column, sticky = settings.Tkinter.W ) return photoButton def getRoundedRulingText( self, extraDecimalPlaces, number ): "Get the rounded ruling text." rulingText = euclidean.getRoundedToPlacesString( extraDecimalPlaces - math.floor( math.log10( self.rulingSeparationWidthMillimeters ) ), number ) if self.rulingSeparationWidthMillimeters < .99: return rulingText if rulingText[ - len('.0') : ] == '.0': return rulingText[ : - len('.0') ] return rulingText def getRulingSeparationWidthPixels( self, rank ): "Get the separation width in pixels." return euclidean.getIncrementFromRank( rank ) * self.skein.scale def getScrollPaneCenter(self): "Get the center of the scroll pane." return self.getScrollPaneFraction() + self.canvasScreenCenter def getScrollPaneFraction(self): "Get the center of the scroll pane." return complex( self.xScrollbar.get()[0], self.yScrollbar.get()[0] ) def getSlideShowDelay(self): "Get the slide show delay in milliseconds." slideShowDelay = int( round( 1000.0 / self.repository.animationSlideShowRate.value ) ) return max( slideShowDelay, 1 ) def getUpdateSkeinPanes(self): "Get the update skein panes." layerPlusExtraSpan = self.repository.layer.value + self.repository.layerExtraSpan.value layersFrom = max( 0, min( self.repository.layer.value, layerPlusExtraSpan ) ) layersTo = min( len( self.skeinPanes ), max( self.repository.layer.value, layerPlusExtraSpan ) + 1 ) return self.skeinPanes[ layersFrom : layersTo ] def isLineBelowZeroSetLayer(self): "Determine if the line index is below zero, and if so set the layer index." if self.repository.line.value >= 0: return False self.repository.line.value = 0 if self.repository.layer.value > 0: self.setLayerIndex( self.repository.layer.value - 1 ) return True return False def isLineBeyondListSetLayer(self): "Determine if the line index is beyond the end of the list, and if so set the layer index." coloredLinesLength = len( self.getColoredLines() ) if self.repository.line.value < coloredLinesLength: return False self.repository.line.value = coloredLinesLength - 1 if self.repository.layer.value < len( self.skeinPanes ) - 1: self.setLayerIndex( self.repository.layer.value + 1 ) return True return False def keyPressDown(self, event): "The down arrow was pressed." self.mouseTool.keyPressDown(event) def keyPressLeft(self, event): "The left arrow was pressed." self.mouseTool.keyPressLeft(event) def keyPressReturn(self, event): "The return key was pressed." self.mouseTool.keyPressReturn(event) def keyPressRight(self, event): "The right arrow was pressed." self.mouseTool.keyPressRight(event) def keyPressUp(self, event): "The up arrow was pressed." self.mouseTool.keyPressUp(event) def layerEntryReturnPressed(self, event=None): "The layer index entry return was pressed." self.setLayerIndex( int( self.layerEntry.get() ) ) def limitIndex(self): "Limit the index so it is not below zero or above the top." self.repository.layer.value = max( 0, self.repository.layer.value ) self.repository.layer.value = min( len( self.skeinPanes ) - 1, self.repository.layer.value ) def limitIndexSetArrowMouseDeleteCanvas(self): "Limit the index, set the arrow type, and delete all the canvas items." self.limitIndex() self.arrowType = None if self.repository.drawArrows.value: self.arrowType = 'last' self.canvas.delete( settings.Tkinter.ALL ) def lineEntryReturnPressed(self, event=None): "The line index entry return was pressed." self.repository.line.value = int( self.lineEntry.get() ) if self.isLineBelowZeroSetLayer(): return if self.isLineBeyondListSetLayer(): return self.cancelTimerResetButtons() self.updateMouseToolIfSelection() self.setLineButtonsState() def lineDive(self): "Line dive, go down periodically." oldLineDiveButtonText = self.lineDiveButton['text'] self.cancelTimerResetButtons() if oldLineDiveButtonText == 'stop': return self.lineDiveCycle() def lineDiveCycle(self): "Start the line dive cycle." self.cancelTimer() self.repository.line.value -= 1 if self.repository.line.value < 0: self.repository.line.value = 0 if self.repository.layer.value == 0: self.resetPeriodicButtonsText() self.setLineButtonsState() return self.setLayerIndex( self.repository.layer.value - 1 ) else: self.updateMouseToolIfSelection() self.setLineButtonsState() self.setButtonImageText( self.lineDiveButton, 'stop') coloredLine = self.getColoredLines()[ self.repository.line.value ] self.timerID = self.canvas.after( self.getAnimationLineDelay( coloredLine ), self.lineDiveCycle ) def lineSoar(self): "Line soar, go up periodically." oldLineSoarButtonText = self.lineSoarButton['text'] self.cancelTimerResetButtons() if oldLineSoarButtonText == 'stop': return self.lineSoarCycle() def lineSoarCycle(self): "Start the line soar cycle." self.cancelTimer() self.repository.line.value += 1 coloredLinesLength = len( self.getColoredLines() ) if self.repository.line.value >= coloredLinesLength: self.repository.line.value = coloredLinesLength - 1 if self.repository.layer.value > len( self.skeinPanes ) - 2: self.resetPeriodicButtonsText() self.setLineButtonsState() return self.setLayerIndex( self.repository.layer.value + 1 ) else: self.updateMouseToolIfSelection() self.setLineButtonsState() self.setButtonImageText( self.lineSoarButton, 'stop') coloredLine = self.getColoredLines()[ self.repository.line.value ] self.timerID = self.canvas.after( self.getAnimationLineDelay( coloredLine ), self.lineSoarCycle ) def motion(self, event): "The mouse moved." self.mouseTool.motion(event) def phoenixUpdate(self): "Update the skein, and deiconify a new window and destroy the old." self.updateNewDestroyOld( self.getScrollPaneCenter() ) def relayXview( self, *args ): "Relay xview changes." self.canvas.xview( *args ) def relayYview( self, *args ): "Relay yview changes." self.canvas.yview( *args ) def resetPeriodicButtonsText(self): "Reset the text of the periodic buttons." self.setButtonImageText( self.diveButton, 'dive') self.setButtonImageText( self.soarButton, 'soar') self.setButtonImageText( self.lineDiveButton, 'dive') self.setButtonImageText( self.lineSoarButton, 'soar') def redisplayWindowUpdate(self, event=None): "Deiconify a new window and destroy the old." self.repository.setToDisplaySave() self.getCopy().updateDeiconify( self.getScrollPaneCenter() ) self.root.after( 1, self.root.destroy ) # to get around 'Font Helvetica -12 still in cache.' segmentation bug, instead of simply calling self.root.destroy() def save(self): "Set the setting values to the display, save the new values." for menuEntity in self.repository.menuEntities: if menuEntity in self.repository.preferences: menuEntity.setToDisplay() self.setInsetToDisplay() settings.writeSettings(self.repository) def scaleEntryReturnPressed(self, event=None): "The scale entry return was pressed." self.repository.scale.value = float( self.scaleEntry.get() ) self.phoenixUpdate() def setButtonImageText( self, button, text ): "Set the text of the e periodic buttons." photoImage = self.photoImages[ text ] if photoImage != None: button['image'] = photoImage button['text'] = text def setDisplayLayerIndex(self): "Set the display of the layer index entry field and buttons." coloredLines = self.getColoredLines() isAboveFloor = self.repository.layer.value > 0 isBelowCeiling = self.repository.layer.value < len( self.skeinPanes ) - 1 setStateNormalDisabled( isAboveFloor, self.diveButton ) setStateNormalDisabled( isBelowCeiling, self.soarButton ) self.setLineButtonsState() settings.setEntryText( self.layerEntry, self.repository.layer.value ) settings.setEntryText( self.lineEntry, self.repository.line.value ) settings.setEntryText( self.scaleEntry, self.repository.scale.value ) self.mouseTool.update() self.setInsetToDisplay() def setInsetToDisplay(self): "Set the repository to the display." if self.root.state() != 'normal': return excessExtent = int( self.xScrollbar['width'] ) * 21 / 15 screenSize = self.skein.screenSize xScrollbarCanvasPortion = getScrollbarCanvasPortion( self.xScrollbar ) newScreenHorizontalInset = int( self.root.winfo_screenwidth() - round( xScrollbarCanvasPortion * screenSize.real ) + excessExtent ) if xScrollbarCanvasPortion < .99: self.repository.screenHorizontalInset.value = newScreenHorizontalInset else: self.repository.screenHorizontalInset.value = min( self.repository.screenHorizontalInset.value, newScreenHorizontalInset ) yScrollbarCanvasPortion = getScrollbarCanvasPortion( self.yScrollbar ) newScreenVerticalInset = int( self.root.winfo_screenheight() - round( yScrollbarCanvasPortion * screenSize.imag ) + excessExtent ) if yScrollbarCanvasPortion < .99: self.repository.screenVerticalInset.value = newScreenVerticalInset else: self.repository.screenVerticalInset.value = min( self.repository.screenVerticalInset.value, newScreenVerticalInset ) def setLayerIndex( self, layerIndex ): "Set the layer index." self.cancelTimerResetButtons() oldLayerIndex = self.repository.layer.value self.repository.layer.value = layerIndex self.limitIndex() coloredLines = self.getColoredLines() if self.repository.layer.value < oldLayerIndex: self.repository.line.value = len( coloredLines ) - 1 self.lineEntry['to'] = getLengthMinusOneMinimumOne( coloredLines ) if self.repository.layer.value > oldLayerIndex: self.repository.line.value = 0 self.lineEntry['to'] = getLengthMinusOneMinimumOne( coloredLines ) self.update() def setLineButtonsState(self): "Set the state of the line buttons." coloredLines = self.getColoredLines() isAboveFloor = self.repository.layer.value > 0 isBelowCeiling = self.repository.layer.value < len( self.skeinPanes ) - 1 setStateNormalDisabled( isAboveFloor or self.repository.line.value > 0, self.lineDiveButton ) setStateNormalDisabled( isBelowCeiling or self.repository.line.value < len( coloredLines ) - 1, self.lineSoarButton ) def setWindowNewMouseTool( self, getNewMouseToolFunction, mouseTool ): "Set the getNewMouseTool function and the update function." mouseTool.getNewMouseToolFunction = getNewMouseToolFunction mouseTool.setUpdateFunction( self.activateMouseModeTool ) def setWindowToDisplaySavePhoenixUpdate(self, event=None): "Set the setting values to the display, save the new values, then call the update function." self.repository.setToDisplaySave() self.phoenixUpdate() def setWindowToDisplaySaveUpdate(self, event=None): "Set the setting values to the display, save the new values, then call the update function." self.repository.setToDisplaySave() self.update() def shiftButtonRelease1(self, event): "The button was released while the shift key was pressed." self.mouseTool.buttonRelease1( event, True ) def shiftMotion(self, event): "The mouse moved." self.mouseTool.motion( event, True ) def soar(self): "Soar, go up periodically." oldSoarButtonText = self.soarButton['text'] self.cancelTimerResetButtons() if oldSoarButtonText == 'stop': return self.soarCycle() def soarCycle(self): "Start the soar cycle." self.cancelTimer() self.repository.layer.value += 1 self.update() if self.repository.layer.value > len( self.skeinPanes ) - 2: self.resetPeriodicButtonsText() return self.setButtonImageText( self.soarButton, 'stop') self.timerID = self.canvas.after( self.getSlideShowDelay(), self.soarCycle ) def updateDeiconify( self, center = complex( 0.5, 0.5 ) ): "Update and deiconify the window." self.addSettingsMenuSetWindowGeometry( center ) self.update() self.root.deiconify() def updateMouseToolIfSelection(self): "Update the mouse tool if it is a selection tool." if self.mouseTool == None: return if self.mouseTool.isSelectionTool(): self.mouseTool.update() def updateNewDestroyOld( self, scrollPaneCenter ): "Update and deiconify a window and destroy the old." self.getCopyWithNewSkein().updateDeiconify( scrollPaneCenter ) self.root.after( 1, self.root.destroy ) # to get around 'Font Helvetica -12 still in cache.' segmentation bug, instead of simply calling self.root.destroy()
gpl-2.0
apache/couchdb-mango
test/04-key-tests.py
5
4947
# -*- coding: latin-1 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import mango import unittest TEST_DOCS = [ { "type": "complex_key", "title": "normal key" }, { "type": "complex_key", "title": "key with dot", "dot.key": "dot's value", "none": { "dot": "none dot's value" }, "name.first" : "Kvothe" }, { "type": "complex_key", "title": "key with peso", "$key": "peso", "deep": { "$key": "deep peso" }, "name": {"first" : "Master Elodin"} }, { "type": "complex_key", "title": "unicode key", "": "apple" }, { "title": "internal_fields_format", "utf8-1[]:string" : "string", "utf8-2[]:boolean[]" : True, "utf8-3[]:number" : 9, "utf8-3[]:null" : None } ] @unittest.skipUnless(mango.has_text_service(), "requires text service") class KeyTests(mango.DbPerClass): @classmethod def setUpClass(klass): super(KeyTests, klass).setUpClass() klass.db.save_docs(TEST_DOCS, w=3) klass.db.create_index(["type"], ddoc="view") if mango.has_text_service(): klass.db.create_text_index(ddoc="text") def run_check(self, query, check, fields=None, indexes=None): if indexes is None: indexes = ["view", "text"] for idx in indexes: docs = self.db.find(query, fields=fields, use_index=idx) check(docs) def test_dot_key(self): query = {"type": "complex_key"} fields = ["title", "dot\\.key", "none.dot"] def check(docs): assert len(docs) == 4 assert docs[1].has_key("dot.key") assert docs[1]["dot.key"] == "dot's value" assert docs[1].has_key("none") assert docs[1]["none"]["dot"] == "none dot's value" self.run_check(query, check, fields=fields) def test_peso_key(self): query = {"type": "complex_key"} fields = ["title", "$key", "deep.$key"] def check(docs): assert len(docs) == 4 assert docs[2].has_key("$key") assert docs[2]["$key"] == "peso" assert docs[2].has_key("deep") assert docs[2]["deep"]["$key"] == "deep peso" self.run_check(query, check, fields=fields) def test_unicode_in_fieldname(self): query = {"type": "complex_key"} fields = ["title", ""] def check(docs): assert len(docs) == 4 # note:  == \uf8ff assert docs[3].has_key(u'\uf8ff') assert docs[3][u'\uf8ff'] == "apple" self.run_check(query, check, fields=fields) # The rest of these tests are only run against the text # indexes because view indexes don't have to worry about # field *name* escaping in the index. def test_unicode_in_selector_field(self): query = {"" : "apple"} def check(docs): assert len(docs) == 1 assert docs[0][u"\uf8ff"] == "apple" self.run_check(query, check, indexes=["text"]) def test_internal_field_tests(self): queries = [ {"utf8-1[]:string" : "string"}, {"utf8-2[]:boolean[]" : True}, {"utf8-3[]:number" : 9}, {"utf8-3[]:null" : None} ] def check(docs): assert len(docs) == 1 assert docs[0]["title"] == "internal_fields_format" for query in queries: self.run_check(query, check, indexes=["text"]) def test_escape_period(self): query = {"name\\.first" : "Kvothe"} def check(docs): assert len(docs) == 1 assert docs[0]["name.first"] == "Kvothe" self.run_check(query, check, indexes=["text"]) query = {"name.first" : "Kvothe"} def check_empty(docs): assert len(docs) == 0 self.run_check(query, check_empty, indexes=["text"]) def test_object_period(self): query = {"name.first" : "Master Elodin"} def check(docs): assert len(docs) == 1 assert docs[0]["title"] == "key with peso" self.run_check(query, check, indexes=["text"]) query = {"name\\.first" : "Master Elodin"} def check_empty(docs): assert len(docs) == 0 self.run_check(query, check_empty, indexes=["text"])
apache-2.0
CiscoSystems/nova
nova/tests/db/test_sqlite.py
16
1931
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test cases for sqlite-specific logic""" from nova import test from sqlalchemy import create_engine from sqlalchemy import Column, BigInteger, String import sqlalchemy.engine.reflection from sqlalchemy.ext.declarative import declarative_base class TestSqlite(test.NoDBTestCase): """Tests for sqlite-specific logic.""" def test_big_int_mapping(self): base_class = declarative_base() class User(base_class): """Dummy class with a BigInteger column for testing.""" __tablename__ = "users" id = Column(BigInteger, primary_key=True) name = Column(String) engine = create_engine('sqlite://') base_class.metadata.create_all(engine) insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine) id_type = None for column in insp.get_columns('users'): if column['name'] == 'id': id_type = column['type'].compile() # NOTE(russellb) We have a hook in nova.db.sqlalchemy that makes it so # BigInteger() is compiled to INTEGER for sqlite instead of BIGINT. self.assertEqual('INTEGER', id_type)
apache-2.0
ricard33/dojango
dojango/decorators.py
6
5526
from django import VERSION as django_version if django_version >= (1, 5, 0): import json else: from django.utils import simplejson as json from django.http import HttpResponseNotAllowed, HttpResponseServerError from util import to_json_response from util import to_dojo_data try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.3, 2.4 fallback. def expect_post_request(func): """Allow only POST requests to come in, throw an exception otherwise. This relieves from checking every time that the request is really a POST request, which it should be when using this decorator. """ def _ret(*args, **kwargs): ret = func(*args, **kwargs) request = args[0] if not request.method=='POST': return HttpResponseNotAllowed(['POST']) return ret return _ret def add_request_getdict(func): """Add the method getdict() to the request object. This works just like getlist() only that it decodes any nested JSON encoded object structure. Since sending deep nested structures is not possible via GET/POST by default, this enables it. Of course you need to make sure that on the JavaScript side you are also sending the data properly, which dojango.send() automatically does. Example: this is being sent: one:1 two:{"three":3, "four":4} using request.POST.getdict('two') returns a dict containing the values sent by the JavaScript. """ def _ret(*args, **kwargs): args[0].POST.__class__.getdict = __getdict ret = func(*args, **kwargs) return ret return _ret def __getdict(self, key): ret = self.get(key) try: ret = json.loads(ret) except ValueError: # The value was not JSON encoded :-) raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret))) return ret def json_response(func): """ A simple json response decorator. Use it on views, where a python data object should be converted to a json response: @json_response def my_view(request): my_data = {'foo': 'bar'} return my_data """ def inner(request, *args, **kwargs): ret = func(request, *args, **kwargs) return __prepare_json_ret(request, ret) return wraps(func)(inner) def jsonp_response_custom(callback_param_name): """ A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName. It acts like the json_response decorator but with the difference, that it wraps the returned json string into a client-specified function name (that is the Padding). You can add this decorator to a function like that: @jsonp_response_custom("my_callback_param") def my_view(request): my_data = {'foo': 'bar'} return my_data Your now can access this view from a foreign URL using JSONP. An example with Dojo looks like that: dojo.io.script.get({ url:"http://example.com/my_url/", callbackParamName:"my_callback_param", load: function(response){ console.log(response); } }); Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same. """ def decorator(func): def inner(request, *args, **kwargs): ret = func(request, *args, **kwargs) return __prepare_json_ret(request, ret, callback_param_name=callback_param_name) return wraps(func)(inner) return decorator jsonp_response = jsonp_response_custom("jsonp_callback") jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name." def json_iframe_response(func): """ A simple json response decorator but wrapping the json response into a html page. It helps when doing a json request using an iframe (e.g. file up-/download): @json_iframe def my_view(request): my_data = {'foo': 'bar'} return my_data """ def inner(request, *args, **kwargs): ret = func(request, *args, **kwargs) return __prepare_json_ret(request, ret, use_iframe=True) return wraps(func)(inner) def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False): if ret==False: ret = {'success':False} elif ret==None: # Sometimes there is no return. ret = {} # Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception. func_name = None if callback_param_name: func_name = request.GET.get(callback_param_name, "callbackParamName") try: if not ret.has_key('success'): ret['success'] = True except AttributeError, e: raise Exception("The returned data of your function must be a dictionary!") json_ret = "" try: # Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside json_ret = to_json_response(ret, func_name, use_iframe) except Exception, e: print '\n\n===============Exception=============\n\n'+str(e)+'\n\n' print ret print '\n\n' return HttpResponseServerError(content=str(e)) return json_ret
bsd-3-clause
dimid/ansible-modules-extras
cloud/centurylink/clc_server.py
40
57449
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/> # DOCUMENTATION = ''' module: clc_server short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud. description: - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. version_added: "2.0" options: additional_disks: description: - The list of additional disks for the server required: False default: [] add_public_ip: description: - Whether to add a public ip to the server required: False default: False choices: [False, True] alias: description: - The account alias to provision the servers under. required: False default: None anti_affinity_policy_id: description: - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. required: False default: None anti_affinity_policy_name: description: - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. required: False default: None alert_policy_id: description: - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. required: False default: None alert_policy_name: description: - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. required: False default: None count: description: - The number of servers to build (mutually exclusive with exact_count) required: False default: 1 count_group: description: - Required when exact_count is specified. The Server Group use to determine how many severs to deploy. required: False default: None cpu: description: - How many CPUs to provision on the server default: 1 required: False cpu_autoscale_policy_id: description: - The autoscale policy to assign to the server. default: None required: False custom_fields: description: - The list of custom fields to set on the server. default: [] required: False description: description: - The description to set for the server. default: None required: False exact_count: description: - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, creating and deleting them to reach that count. Requires count_group to be set. default: None required: False group: description: - The Server Group to create servers under. default: 'Default Group' required: False ip_address: description: - The IP Address for the server. One is assigned if not provided. default: None required: False location: description: - The Datacenter to create servers in. default: None required: False managed_os: description: - Whether to create the server as 'Managed' or not. default: False required: False choices: [True, False] memory: description: - Memory in GB. default: 1 required: False name: description: - A 1 to 6 character identifier to use for the server. This is required when state is 'present' default: None required: False network_id: description: - The network UUID on which to create servers. default: None required: False packages: description: - The list of blue print packages to run on the server after its created. default: [] required: False password: description: - Password for the administrator / root user default: None required: False primary_dns: description: - Primary DNS used by the server. default: None required: False public_ip_protocol: description: - The protocol to use for the public ip if add_public_ip is set to True. default: 'TCP' choices: ['TCP', 'UDP', 'ICMP'] required: False public_ip_ports: description: - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. default: [] required: False secondary_dns: description: - Secondary DNS used by the server. default: None required: False server_ids: description: - Required for started, stopped, and absent states. A list of server Ids to insure are started, stopped, or absent. default: [] required: False source_server_password: description: - The password for the source server if a clone is specified. default: None required: False state: description: - The state to insure that the provided resources are in. default: 'present' required: False choices: ['present', 'absent', 'started', 'stopped'] storage_type: description: - The type of storage to attach to the server. default: 'standard' required: False choices: ['standard', 'hyperscale'] template: description: - The template to use for server creation. Will search for a template if a partial string is provided. This is required when state is 'present' default: None required: False ttl: description: - The time to live for the server in seconds. The server will be deleted when this time expires. default: None required: False type: description: - The type of server to create. default: 'standard' required: False choices: ['standard', 'hyperscale', 'bareMetal'] configuration_id: description: - Only required for bare metal servers. Specifies the identifier for the specific configuration type of bare metal server to deploy. default: None required: False os_type: description: - Only required for bare metal servers. Specifies the OS to provision with the bare metal server. default: None required: False choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] wait: description: - Whether to wait for the provisioning tasks to finish before returning. default: True required: False choices: [True, False] requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' # Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - name: Provision a single Ubuntu Server clc_server: name: test template: ubuntu-14-64 count: 1 group: 'Default Group' state: present - name: Ensure 'Default Group' has exactly 5 servers clc_server: name: test template: ubuntu-14-64 exact_count: 5 count_group: 'Default Group' group: 'Default Group' - name: Stop a Server clc_server: server_ids: ['UC1ACCT-TEST01'] state: stopped - name: Start a Server clc_server: server_ids: ['UC1ACCT-TEST01'] state: started - name: Delete a Server clc_server: server_ids: ['UC1ACCT-TEST01'] state: absent ''' RETURN = ''' server_ids: description: The list of server ids that are created returned: success type: list sample: [ "UC1TEST-SVR01", "UC1TEST-SVR02" ] partially_created_server_ids: description: The list of server ids that are partially created returned: success type: list sample: [ "UC1TEST-SVR01", "UC1TEST-SVR02" ] servers: description: The list of server objects returned from CLC returned: success type: list sample: [ { "changeInfo":{ "createdBy":"service.wfad", "createdDate":1438196820, "modifiedBy":"service.wfad", "modifiedDate":1438196820 }, "description":"test-server", "details":{ "alertPolicies":[ ], "cpu":1, "customFields":[ ], "diskCount":3, "disks":[ { "id":"0:0", "partitionPaths":[ ], "sizeGB":1 }, { "id":"0:1", "partitionPaths":[ ], "sizeGB":2 }, { "id":"0:2", "partitionPaths":[ ], "sizeGB":14 } ], "hostName":"", "inMaintenanceMode":false, "ipAddresses":[ { "internal":"10.1.1.1" } ], "memoryGB":1, "memoryMB":1024, "partitions":[ ], "powerState":"started", "snapshots":[ ], "storageGB":17 }, "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", "id":"test-server", "ipaddress":"10.120.45.23", "isTemplate":false, "links":[ { "href":"/v2/servers/wfad/test-server", "id":"test-server", "rel":"self", "verbs":[ "GET", "PATCH", "DELETE" ] }, { "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", "id":"086ac1dfe0b6411989e8d1b77c4065f0", "rel":"group" }, { "href":"/v2/accounts/wfad", "id":"wfad", "rel":"account" }, { "href":"/v2/billing/wfad/serverPricing/test-server", "rel":"billing" }, { "href":"/v2/servers/wfad/test-server/publicIPAddresses", "rel":"publicIPAddresses", "verbs":[ "POST" ] }, { "href":"/v2/servers/wfad/test-server/credentials", "rel":"credentials" }, { "href":"/v2/servers/wfad/test-server/statistics", "rel":"statistics" }, { "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", "rel":"upcomingScheduledActivities" }, { "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", "rel":"scheduledActivities", "verbs":[ "GET", "POST" ] }, { "href":"/v2/servers/wfad/test-server/capabilities", "rel":"capabilities" }, { "href":"/v2/servers/wfad/test-server/alertPolicies", "rel":"alertPolicyMappings", "verbs":[ "POST" ] }, { "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", "rel":"antiAffinityPolicyMapping", "verbs":[ "PUT", "DELETE" ] }, { "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", "rel":"cpuAutoscalePolicyMapping", "verbs":[ "PUT", "DELETE" ] } ], "locationId":"UC1", "name":"test-server", "os":"ubuntu14_64Bit", "osType":"Ubuntu 14 64-bit", "status":"active", "storageType":"standard", "type":"standard" } ] ''' __version__ = '${version}' from time import sleep from distutils.version import LooseVersion try: import requests except ImportError: REQUESTS_FOUND = False else: REQUESTS_FOUND = True # # Requires the clc-python-sdk. # sudo pip install clc-sdk # try: import clc as clc_sdk from clc import CLCException from clc import APIFailedResponse except ImportError: CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True class ClcServer: clc = clc_sdk def __init__(self, module): """ Construct module """ self.clc = clc_sdk self.module = module self.group_dict = {} if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') if not REQUESTS_FOUND: self.module.fail_json( msg='requests library is required for this module') if requests.__version__ and LooseVersion( requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) def process_request(self): """ Process the request - Main Code Path :return: Returns with either an exit_json or fail_json """ changed = False new_server_ids = [] server_dict_array = [] self._set_clc_credentials_from_env() self.module.params = self._validate_module_params( self.clc, self.module) p = self.module.params state = p.get('state') # # Handle each state # partial_servers_ids = [] if state == 'absent': server_ids = p['server_ids'] if not isinstance(server_ids, list): return self.module.fail_json( msg='server_ids needs to be a list of instances to delete: %s' % server_ids) (changed, server_dict_array, new_server_ids) = self._delete_servers(module=self.module, clc=self.clc, server_ids=server_ids) elif state in ('started', 'stopped'): server_ids = p.get('server_ids') if not isinstance(server_ids, list): return self.module.fail_json( msg='server_ids needs to be a list of servers to run: %s' % server_ids) (changed, server_dict_array, new_server_ids) = self._start_stop_servers(self.module, self.clc, server_ids) elif state == 'present': # Changed is always set to true when provisioning new instances if not p.get('template') and p.get('type') != 'bareMetal': return self.module.fail_json( msg='template parameter is required for new instance') if p.get('exact_count') is None: (server_dict_array, new_server_ids, partial_servers_ids, changed) = self._create_servers(self.module, self.clc) else: (server_dict_array, new_server_ids, partial_servers_ids, changed) = self._enforce_count(self.module, self.clc) self.module.exit_json( changed=changed, server_ids=new_server_ids, partially_created_server_ids=partial_servers_ids, servers=server_dict_array) @staticmethod def _define_module_argument_spec(): """ Define the argument spec for the ansible module :return: argument spec dictionary """ argument_spec = dict( name=dict(), template=dict(), group=dict(default='Default Group'), network_id=dict(), location=dict(default=None), cpu=dict(default=1), memory=dict(default=1), alias=dict(default=None), password=dict(default=None, no_log=True), ip_address=dict(default=None), storage_type=dict( default='standard', choices=[ 'standard', 'hyperscale']), type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), primary_dns=dict(default=None), secondary_dns=dict(default=None), additional_disks=dict(type='list', default=[]), custom_fields=dict(type='list', default=[]), ttl=dict(default=None), managed_os=dict(type='bool', default=False), description=dict(default=None), source_server_password=dict(default=None), cpu_autoscale_policy_id=dict(default=None), anti_affinity_policy_id=dict(default=None), anti_affinity_policy_name=dict(default=None), alert_policy_id=dict(default=None), alert_policy_name=dict(default=None), packages=dict(type='list', default=[]), state=dict( default='present', choices=[ 'present', 'absent', 'started', 'stopped']), count=dict(type='int', default=1), exact_count=dict(type='int', default=None), count_group=dict(), server_ids=dict(type='list', default=[]), add_public_ip=dict(type='bool', default=False), public_ip_protocol=dict( default='TCP', choices=[ 'TCP', 'UDP', 'ICMP']), public_ip_ports=dict(type='list', default=[]), configuration_id=dict(default=None), os_type=dict(default=None, choices=[ 'redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit' ]), wait=dict(type='bool', default=True)) mutually_exclusive = [ ['exact_count', 'count'], ['exact_count', 'state'], ['anti_affinity_policy_id', 'anti_affinity_policy_name'], ['alert_policy_id', 'alert_policy_name'], ] return {"argument_spec": argument_spec, "mutually_exclusive": mutually_exclusive} def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") @staticmethod def _validate_module_params(clc, module): """ Validate the module params, and lookup default values. :param clc: clc-sdk instance to use :param module: module to validate :return: dictionary of validated params """ params = module.params datacenter = ClcServer._find_datacenter(clc, module) ClcServer._validate_types(module) ClcServer._validate_name(module) params['alias'] = ClcServer._find_alias(clc, module) params['cpu'] = ClcServer._find_cpu(clc, module) params['memory'] = ClcServer._find_memory(clc, module) params['description'] = ClcServer._find_description(module) params['ttl'] = ClcServer._find_ttl(clc, module) params['template'] = ClcServer._find_template_id(module, datacenter) params['group'] = ClcServer._find_group(module, datacenter).id params['network_id'] = ClcServer._find_network_id(module, datacenter) params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( clc, module) params['alert_policy_id'] = ClcServer._find_alert_policy_id( clc, module) return params @staticmethod def _find_datacenter(clc, module): """ Find the datacenter by calling the CLC API. :param clc: clc-sdk instance to use :param module: module to validate :return: clc-sdk.Datacenter instance """ location = module.params.get('location') try: if not location: account = clc.v2.Account() location = account.data.get('primaryDataCenter') data_center = clc.v2.Datacenter(location) return data_center except CLCException as ex: module.fail_json( msg=str( "Unable to find location: {0}".format(location))) @staticmethod def _find_alias(clc, module): """ Find or Validate the Account Alias by calling the CLC API :param clc: clc-sdk instance to use :param module: module to validate :return: clc-sdk.Account instance """ alias = module.params.get('alias') if not alias: try: alias = clc.v2.Account.GetAlias() except CLCException as ex: module.fail_json(msg='Unable to find account alias. {0}'.format( ex.message )) return alias @staticmethod def _find_cpu(clc, module): """ Find or validate the CPU value by calling the CLC API :param clc: clc-sdk instance to use :param module: module to validate :return: Int value for CPU """ cpu = module.params.get('cpu') group_id = module.params.get('group_id') alias = module.params.get('alias') state = module.params.get('state') if not cpu and state == 'present': group = clc.v2.Group(id=group_id, alias=alias) if group.Defaults("cpu"): cpu = group.Defaults("cpu") else: module.fail_json( msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) return cpu @staticmethod def _find_memory(clc, module): """ Find or validate the Memory value by calling the CLC API :param clc: clc-sdk instance to use :param module: module to validate :return: Int value for Memory """ memory = module.params.get('memory') group_id = module.params.get('group_id') alias = module.params.get('alias') state = module.params.get('state') if not memory and state == 'present': group = clc.v2.Group(id=group_id, alias=alias) if group.Defaults("memory"): memory = group.Defaults("memory") else: module.fail_json(msg=str( "Can\'t determine a default memory value. Please provide a value for memory.")) return memory @staticmethod def _find_description(module): """ Set the description module param to name if description is blank :param module: the module to validate :return: string description """ description = module.params.get('description') if not description: description = module.params.get('name') return description @staticmethod def _validate_types(module): """ Validate that type and storage_type are set appropriately, and fail if not :param module: the module to validate :return: none """ state = module.params.get('state') server_type = module.params.get( 'type').lower() if module.params.get('type') else None storage_type = module.params.get( 'storage_type').lower() if module.params.get('storage_type') else None if state == "present": if server_type == "standard" and storage_type not in ( "standard", "premium"): module.fail_json( msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) if server_type == "hyperscale" and storage_type != "hyperscale": module.fail_json( msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) @staticmethod def _validate_name(module): """ Validate that name is the correct length if provided, fail if it's not :param module: the module to validate :return: none """ server_name = module.params.get('name') state = module.params.get('state') if state == 'present' and ( len(server_name) < 1 or len(server_name) > 6): module.fail_json(msg=str( "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) @staticmethod def _find_ttl(clc, module): """ Validate that TTL is > 3600 if set, and fail if not :param clc: clc-sdk instance to use :param module: module to validate :return: validated ttl """ ttl = module.params.get('ttl') if ttl: if ttl <= 3600: return module.fail_json(msg=str("Ttl cannot be <= 3600")) else: ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) return ttl @staticmethod def _find_template_id(module, datacenter): """ Find the template id by calling the CLC API. :param module: the module to validate :param datacenter: the datacenter to search for the template :return: a valid clc template id """ lookup_template = module.params.get('template') state = module.params.get('state') type = module.params.get('type') result = None if state == 'present' and type != 'bareMetal': try: result = datacenter.Templates().Search(lookup_template)[0].id except CLCException: module.fail_json( msg=str( "Unable to find a template: " + lookup_template + " in location: " + datacenter.id)) return result @staticmethod def _find_network_id(module, datacenter): """ Validate the provided network id or return a default. :param module: the module to validate :param datacenter: the datacenter to search for a network id :return: a valid network id """ network_id = module.params.get('network_id') if not network_id: try: network_id = datacenter.Networks().networks[0].id # -- added for clc-sdk 2.23 compatibility # datacenter_networks = clc_sdk.v2.Networks( # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) # network_id = datacenter_networks.networks[0].id # -- end except CLCException: module.fail_json( msg=str( "Unable to find a network in location: " + datacenter.id)) return network_id @staticmethod def _find_aa_policy_id(clc, module): """ Validate if the anti affinity policy exist for the given name and throw error if not :param clc: the clc-sdk instance :param module: the module to validate :return: aa_policy_id: the anti affinity policy id of the given name. """ aa_policy_id = module.params.get('anti_affinity_policy_id') aa_policy_name = module.params.get('anti_affinity_policy_name') if not aa_policy_id and aa_policy_name: alias = module.params.get('alias') aa_policy_id = ClcServer._get_anti_affinity_policy_id( clc, module, alias, aa_policy_name) if not aa_policy_id: module.fail_json( msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) return aa_policy_id @staticmethod def _find_alert_policy_id(clc, module): """ Validate if the alert policy exist for the given name and throw error if not :param clc: the clc-sdk instance :param module: the module to validate :return: alert_policy_id: the alert policy id of the given name. """ alert_policy_id = module.params.get('alert_policy_id') alert_policy_name = module.params.get('alert_policy_name') if not alert_policy_id and alert_policy_name: alias = module.params.get('alias') alert_policy_id = ClcServer._get_alert_policy_id_by_name( clc=clc, module=module, alias=alias, alert_policy_name=alert_policy_name ) if not alert_policy_id: module.fail_json( msg='No alert policy exist with name : %s' % alert_policy_name) return alert_policy_id def _create_servers(self, module, clc, override_count=None): """ Create New Servers in CLC cloud :param module: the AnsibleModule object :param clc: the clc-sdk instance to use :return: a list of dictionaries with server information about the servers that were created """ p = module.params request_list = [] servers = [] server_dict_array = [] created_server_ids = [] partial_created_servers_ids = [] add_public_ip = p.get('add_public_ip') public_ip_protocol = p.get('public_ip_protocol') public_ip_ports = p.get('public_ip_ports') params = { 'name': p.get('name'), 'template': p.get('template'), 'group_id': p.get('group'), 'network_id': p.get('network_id'), 'cpu': p.get('cpu'), 'memory': p.get('memory'), 'alias': p.get('alias'), 'password': p.get('password'), 'ip_address': p.get('ip_address'), 'storage_type': p.get('storage_type'), 'type': p.get('type'), 'primary_dns': p.get('primary_dns'), 'secondary_dns': p.get('secondary_dns'), 'additional_disks': p.get('additional_disks'), 'custom_fields': p.get('custom_fields'), 'ttl': p.get('ttl'), 'managed_os': p.get('managed_os'), 'description': p.get('description'), 'source_server_password': p.get('source_server_password'), 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), 'packages': p.get('packages'), 'configuration_id': p.get('configuration_id'), 'os_type': p.get('os_type') } count = override_count if override_count else p.get('count') changed = False if count == 0 else True if not changed: return server_dict_array, created_server_ids, partial_created_servers_ids, changed for i in range(0, count): if not module.check_mode: req = self._create_clc_server(clc=clc, module=module, server_params=params) server = req.requests[0].Server() request_list.append(req) servers.append(server) self._wait_for_requests(module, request_list) self._refresh_servers(module, servers) ip_failed_servers = self._add_public_ip_to_servers( module=module, should_add_public_ip=add_public_ip, servers=servers, public_ip_protocol=public_ip_protocol, public_ip_ports=public_ip_ports) ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, module=module, servers=servers) for server in servers: if server in ip_failed_servers or server in ap_failed_servers: partial_created_servers_ids.append(server.id) else: # reload server details server = clc.v2.Server(server.id) server.data['ipaddress'] = server.details[ 'ipAddresses'][0]['internal'] if add_public_ip and len(server.PublicIPs().public_ips) > 0: server.data['publicip'] = str( server.PublicIPs().public_ips[0]) created_server_ids.append(server.id) server_dict_array.append(server.data) return server_dict_array, created_server_ids, partial_created_servers_ids, changed def _enforce_count(self, module, clc): """ Enforce that there is the right number of servers in the provided group. Starts or stops servers as necessary. :param module: the AnsibleModule object :param clc: the clc-sdk instance to use :return: a list of dictionaries with server information about the servers that were created or deleted """ p = module.params changed = False count_group = p.get('count_group') datacenter = ClcServer._find_datacenter(clc, module) exact_count = p.get('exact_count') server_dict_array = [] partial_servers_ids = [] changed_server_ids = [] # fail here if the exact count was specified without filtering # on a group, as this may lead to a undesired removal of instances if exact_count and count_group is None: return module.fail_json( msg="you must use the 'count_group' option with exact_count") servers, running_servers = ClcServer._find_running_servers_by_group( module, datacenter, count_group) if len(running_servers) == exact_count: changed = False elif len(running_servers) < exact_count: to_create = exact_count - len(running_servers) server_dict_array, changed_server_ids, partial_servers_ids, changed \ = self._create_servers(module, clc, override_count=to_create) for server in server_dict_array: running_servers.append(server) elif len(running_servers) > exact_count: to_remove = len(running_servers) - exact_count all_server_ids = sorted([x.id for x in running_servers]) remove_ids = all_server_ids[0:to_remove] (changed, server_dict_array, changed_server_ids) \ = ClcServer._delete_servers(module, clc, remove_ids) return server_dict_array, changed_server_ids, partial_servers_ids, changed @staticmethod def _wait_for_requests(module, request_list): """ Block until server provisioning requests are completed. :param module: the AnsibleModule object :param request_list: a list of clc-sdk.Request instances :return: none """ wait = module.params.get('wait') if wait: # Requests.WaitUntilComplete() returns the count of failed requests failed_requests_count = sum( [request.WaitUntilComplete() for request in request_list]) if failed_requests_count > 0: module.fail_json( msg='Unable to process server request') @staticmethod def _refresh_servers(module, servers): """ Loop through a list of servers and refresh them. :param module: the AnsibleModule object :param servers: list of clc-sdk.Server instances to refresh :return: none """ for server in servers: try: server.Refresh() except CLCException as ex: module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( server.id, ex.message )) @staticmethod def _add_public_ip_to_servers( module, should_add_public_ip, servers, public_ip_protocol, public_ip_ports): """ Create a public IP for servers :param module: the AnsibleModule object :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False :param servers: List of servers to add public ips to :param public_ip_protocol: a protocol to allow for the public ips :param public_ip_ports: list of ports to allow for the public ips :return: none """ failed_servers = [] if not should_add_public_ip: return failed_servers ports_lst = [] request_list = [] server = None for port in public_ip_ports: ports_lst.append( {'protocol': public_ip_protocol, 'port': port}) try: if not module.check_mode: for server in servers: request = server.PublicIPs().Add(ports_lst) request_list.append(request) except APIFailedResponse: failed_servers.append(server) ClcServer._wait_for_requests(module, request_list) return failed_servers @staticmethod def _add_alert_policy_to_servers(clc, module, servers): """ Associate the alert policy to servers :param clc: the clc-sdk instance to use :param module: the AnsibleModule object :param servers: List of servers to add alert policy to :return: failed_servers: the list of servers which failed while associating alert policy """ failed_servers = [] p = module.params alert_policy_id = p.get('alert_policy_id') alias = p.get('alias') if alert_policy_id and not module.check_mode: for server in servers: try: ClcServer._add_alert_policy_to_server( clc=clc, alias=alias, server_id=server.id, alert_policy_id=alert_policy_id) except CLCException: failed_servers.append(server) return failed_servers @staticmethod def _add_alert_policy_to_server( clc, alias, server_id, alert_policy_id): """ Associate an alert policy to a clc server :param clc: the clc-sdk instance to use :param alias: the clc account alias :param server_id: The clc server id :param alert_policy_id: the alert policy id to be associated to the server :return: none """ try: clc.v2.API.Call( method='POST', url='servers/%s/%s/alertPolicies' % (alias, server_id), payload=json.dumps( { 'id': alert_policy_id })) except APIFailedResponse as e: raise CLCException( 'Failed to associate alert policy to the server : {0} with Error {1}'.format( server_id, str(e.response_text))) @staticmethod def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): """ Returns the alert policy id for the given alert policy name :param clc: the clc-sdk instance to use :param module: the AnsibleModule object :param alias: the clc account alias :param alert_policy_name: the name of the alert policy :return: alert_policy_id: the alert policy id """ alert_policy_id = None policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) if not policies: return alert_policy_id for policy in policies.get('items'): if policy.get('name') == alert_policy_name: if not alert_policy_id: alert_policy_id = policy.get('id') else: return module.fail_json( msg='multiple alert policies were found with policy name : %s' % alert_policy_name) return alert_policy_id @staticmethod def _delete_servers(module, clc, server_ids): """ Delete the servers on the provided list :param module: the AnsibleModule object :param clc: the clc-sdk instance to use :param server_ids: list of servers to delete :return: a list of dictionaries with server information about the servers that were deleted """ terminated_server_ids = [] server_dict_array = [] request_list = [] if not isinstance(server_ids, list) or len(server_ids) < 1: return module.fail_json( msg='server_ids should be a list of servers, aborting') servers = clc.v2.Servers(server_ids).Servers() for server in servers: if not module.check_mode: request_list.append(server.Delete()) ClcServer._wait_for_requests(module, request_list) for server in servers: terminated_server_ids.append(server.id) return True, server_dict_array, terminated_server_ids @staticmethod def _start_stop_servers(module, clc, server_ids): """ Start or Stop the servers on the provided list :param module: the AnsibleModule object :param clc: the clc-sdk instance to use :param server_ids: list of servers to start or stop :return: a list of dictionaries with server information about the servers that were started or stopped """ p = module.params state = p.get('state') changed = False changed_servers = [] server_dict_array = [] result_server_ids = [] request_list = [] if not isinstance(server_ids, list) or len(server_ids) < 1: return module.fail_json( msg='server_ids should be a list of servers, aborting') servers = clc.v2.Servers(server_ids).Servers() for server in servers: if server.powerState != state: changed_servers.append(server) if not module.check_mode: request_list.append( ClcServer._change_server_power_state( module, server, state)) changed = True ClcServer._wait_for_requests(module, request_list) ClcServer._refresh_servers(module, changed_servers) for server in set(changed_servers + servers): try: server.data['ipaddress'] = server.details[ 'ipAddresses'][0]['internal'] server.data['publicip'] = str( server.PublicIPs().public_ips[0]) except (KeyError, IndexError): pass server_dict_array.append(server.data) result_server_ids.append(server.id) return changed, server_dict_array, result_server_ids @staticmethod def _change_server_power_state(module, server, state): """ Change the server powerState :param module: the module to check for intended state :param server: the server to start or stop :param state: the intended powerState for the server :return: the request object from clc-sdk call """ result = None try: if state == 'started': result = server.PowerOn() else: # Try to shut down the server and fall back to power off when unable to shut down. result = server.ShutDown() if result and hasattr(result, 'requests') and result.requests[0]: return result else: result = server.PowerOff() except CLCException: module.fail_json( msg='Unable to change power state for server {0}'.format( server.id)) return result @staticmethod def _find_running_servers_by_group(module, datacenter, count_group): """ Find a list of running servers in the provided group :param module: the AnsibleModule object :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group :param count_group: the group to count the servers :return: list of servers, and list of running servers """ group = ClcServer._find_group( module=module, datacenter=datacenter, lookup_group=count_group) servers = group.Servers().Servers() running_servers = [] for server in servers: if server.status == 'active' and server.powerState == 'started': running_servers.append(server) return servers, running_servers @staticmethod def _find_group(module, datacenter, lookup_group=None): """ Find a server group in a datacenter by calling the CLC API :param module: the AnsibleModule instance :param datacenter: clc-sdk.Datacenter instance to search for the group :param lookup_group: string name of the group to search for :return: clc-sdk.Group instance """ if not lookup_group: lookup_group = module.params.get('group') try: return datacenter.Groups().Get(lookup_group) except CLCException: pass # The search above only acts on the main result = ClcServer._find_group_recursive( module, datacenter.Groups(), lookup_group) if result is None: module.fail_json( msg=str( "Unable to find group: " + lookup_group + " in location: " + datacenter.id)) return result @staticmethod def _find_group_recursive(module, group_list, lookup_group): """ Find a server group by recursively walking the tree :param module: the AnsibleModule instance to use :param group_list: a list of groups to search :param lookup_group: the group to look for :return: list of groups """ result = None for group in group_list.groups: subgroups = group.Subgroups() try: return subgroups.Get(lookup_group) except CLCException: result = ClcServer._find_group_recursive( module, subgroups, lookup_group) if result is not None: break return result @staticmethod def _create_clc_server( clc, module, server_params): """ Call the CLC Rest API to Create a Server :param clc: the clc-python-sdk instance to use :param module: the AnsibleModule instance to use :param server_params: a dictionary of params to use to create the servers :return: clc-sdk.Request object linked to the queued server request """ try: res = clc.v2.API.Call( method='POST', url='servers/%s' % (server_params.get('alias')), payload=json.dumps( { 'name': server_params.get('name'), 'description': server_params.get('description'), 'groupId': server_params.get('group_id'), 'sourceServerId': server_params.get('template'), 'isManagedOS': server_params.get('managed_os'), 'primaryDNS': server_params.get('primary_dns'), 'secondaryDNS': server_params.get('secondary_dns'), 'networkId': server_params.get('network_id'), 'ipAddress': server_params.get('ip_address'), 'password': server_params.get('password'), 'sourceServerPassword': server_params.get('source_server_password'), 'cpu': server_params.get('cpu'), 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), 'memoryGB': server_params.get('memory'), 'type': server_params.get('type'), 'storageType': server_params.get('storage_type'), 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), 'customFields': server_params.get('custom_fields'), 'additionalDisks': server_params.get('additional_disks'), 'ttl': server_params.get('ttl'), 'packages': server_params.get('packages'), 'configurationId': server_params.get('configuration_id'), 'osType': server_params.get('os_type')})) result = clc.v2.Requests(res) except APIFailedResponse as ex: return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( server_params.get('name'), ex.response_text )) # # Patch the Request object so that it returns a valid server # Find the server's UUID from the API response server_uuid = [obj['id'] for obj in res['links'] if obj['rel'] == 'self'][0] # Change the request server method to a _find_server_by_uuid closure so # that it will work result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( clc, module, server_uuid, server_params.get('alias')) return result @staticmethod def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): """ retrieves the anti affinity policy id of the server based on the name of the policy :param clc: the clc-sdk instance to use :param module: the AnsibleModule object :param alias: the CLC account alias :param aa_policy_name: the anti affinity policy name :return: aa_policy_id: The anti affinity policy id """ aa_policy_id = None try: aa_policies = clc.v2.API.Call(method='GET', url='antiAffinityPolicies/%s' % alias) except APIFailedResponse as ex: return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( alias, ex.response_text)) for aa_policy in aa_policies.get('items'): if aa_policy.get('name') == aa_policy_name: if not aa_policy_id: aa_policy_id = aa_policy.get('id') else: return module.fail_json( msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) return aa_policy_id # # This is the function that gets patched to the Request.server object using a lamda closure # @staticmethod def _find_server_by_uuid_w_retry( clc, module, svr_uuid, alias=None, retries=5, back_out=2): """ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. :param clc: the clc-sdk instance to use :param module: the AnsibleModule object :param svr_uuid: UUID of the server :param retries: the number of retry attempts to make prior to fail. default is 5 :param alias: the Account Alias to search :return: a clc-sdk.Server instance """ if not alias: alias = clc.v2.Account.GetAlias() # Wait and retry if the api returns a 404 while True: retries -= 1 try: server_obj = clc.v2.API.Call( method='GET', url='servers/%s/%s?uuid=true' % (alias, svr_uuid)) server_id = server_obj['id'] server = clc.v2.Server( id=server_id, alias=alias, server_obj=server_obj) return server except APIFailedResponse as e: if e.response_status_code != 404: return module.fail_json( msg='A failure response was received from CLC API when ' 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % (svr_uuid, e.response_status_code, e.message)) if retries == 0: return module.fail_json( msg='Unable to reach the CLC API after 5 attempts') sleep(back_out) back_out *= 2 @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ The main function. Instantiates the module and calls process_request. :return: none """ argument_dict = ClcServer._define_module_argument_spec() module = AnsibleModule(supports_check_mode=True, **argument_dict) clc_server = ClcServer(module) clc_server.process_request() from ansible.module_utils.basic import * # pylint: disable=W0614 if __name__ == '__main__': main()
gpl-3.0
MrNuggles/HeyBoet-Telegram-Bot
temboo/Library/Wordnik/Word/GetAudio.py
5
4208
# -*- coding: utf-8 -*- ############################################################################### # # GetAudio # Retrieves the audio pronunciation of a given word. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class GetAudio(Choreography): def __init__(self, temboo_session): """ Create a new instance of the GetAudio Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(GetAudio, self).__init__(temboo_session, '/Library/Wordnik/Word/GetAudio') def new_input_set(self): return GetAudioInputSet() def _make_result_set(self, result, path): return GetAudioResultSet(result, path) def _make_execution(self, session, exec_id, path): return GetAudioChoreographyExecution(session, exec_id, path) class GetAudioInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the GetAudio Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_APIKey(self, value): """ Set the value of the APIKey input for this Choreo. ((required, string) The API Key from Wordnik.) """ super(GetAudioInputSet, self)._set_input('APIKey', value) def set_Cannonical(self, value): """ Set the value of the Cannonical input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).) """ super(GetAudioInputSet, self)._set_input('Cannonical', value) def set_Limit(self, value): """ Set the value of the Limit input for this Choreo. ((optional, integer) Maximum number of results to return. Defaults to 50.) """ super(GetAudioInputSet, self)._set_input('Limit', value) def set_ResponseType(self, value): """ Set the value of the ResponseType input for this Choreo. ((optional, string) Response can be either JSON or XML. Defaults to JSON.) """ super(GetAudioInputSet, self)._set_input('ResponseType', value) def set_UseCanonical(self, value): """ Set the value of the UseCanonical input for this Choreo. ((optional, boolean) If true will try to return the correct word root ('cats' -> 'cat'). If false returns exactly what was requested. Defaults to false.) """ super(GetAudioInputSet, self)._set_input('UseCanonical', value) def set_Word(self, value): """ Set the value of the Word input for this Choreo. ((required, string) The word you want to look up on Wordnik.) """ super(GetAudioInputSet, self)._set_input('Word', value) class GetAudioResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the GetAudio Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. (The response from Wordnik.) """ return self._output.get('Response', None) class GetAudioChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return GetAudioResultSet(response, path)
gpl-3.0
arborh/tensorflow
tensorflow/python/ops/ragged/ragged_string_ops_test.py
8
5449
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_string_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedStringOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def test_rank_one(self): input_array = [b'this', b'is', b'a', b'test'] truth = b'thisisatest' truth_shape = [] with self.cached_session(): output = ragged_string_ops.reduce_join( inputs=input_array, axis=-1, keepdims=False, separator='') output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) self.assertAllEqual(truth_shape, output.get_shape()) @parameterized.parameters([ { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 0, 'keepdims': False, 'truth': [ b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged', b'tensors' ], 'truth_shape': [7], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [b'thisisatestforraggedtensors', b'pleasedonotpanic!'], 'truth_shape': [2], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [ b'this|is|a|test|for|ragged|tensors', b'please|do|not|panic|!' ], 'truth_shape': [2], 'separator': '|', }, { 'input_array': [[[b'a', b'b'], [b'b', b'c']], [[b'dd', b'ee']]], 'axis': -1, 'keepdims': False, 'truth': [[b'a|b', b'b|c'], [b'dd|ee']], 'truth_shape': [2, None], 'separator': '|', }, { 'input_array': [[[[b'a', b'b', b'c'], [b'dd', b'ee']]], [[[b'f', b'g', b'h'], [b'ii', b'jj']]]], 'axis': -2, 'keepdims': False, 'truth': [[[b'a|dd', b'b|ee', b'c']], [[b'f|ii', b'g|jj', b'h']]], 'truth_shape': [2, None, None], 'separator': '|', }, { 'input_array': [[[b't', b'h', b'i', b's'], [b'i', b's'], [b'a'], [b't', b'e', b's', b't']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'axis': -1, 'keepdims': False, 'truth': [[b'this', b'is', b'a', b'test'], [b'please', b'panic']], 'truth_shape': [2, None], 'separator': '', }, { 'input_array': [[[[b't'], [b'h'], [b'i'], [b's']], [[b'i', b's']], [[b'a', b'n']], [[b'e'], [b'r'], [b'r']]], [[[b'p'], [b'l'], [b'e'], [b'a'], [b's'], [b'e']], [[b'p'], [b'a'], [b'n'], [b'i'], [b'c']]]], 'axis': -1, 'keepdims': False, 'truth': [[[b't', b'h', b'i', b's'], [b'is'], [b'an'], [b'e', b'r', b'r']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'truth_shape': [2, None, None], 'separator': '', }, ]) def test_different_ranks(self, input_array, axis, keepdims, truth, truth_shape, separator=''): with self.cached_session(): input_tensor = ragged_factory_ops.constant(input_array) output = ragged_string_ops.reduce_join( inputs=input_tensor, axis=axis, keepdims=keepdims, separator=separator) output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) if all(isinstance(s, tensor_shape.Dimension) for s in output.shape): output_shape = [dim.value for dim in output.shape] else: output_shape = output.shape self.assertAllEqual(truth_shape, output_shape) if __name__ == '__main__': googletest.main()
apache-2.0
galak/zephyr
scripts/kconfig/kconfiglib.py
6
259872
# Copyright (c) 2011-2019, Ulf Magnusson # SPDX-License-Identifier: ISC """ Overview ======== Kconfiglib is a Python 2/3 library for scripting and extracting information from Kconfig (https://www.kernel.org/doc/Documentation/kbuild/kconfig-language.txt) configuration systems. See the homepage at https://github.com/ulfalizer/Kconfiglib for a longer overview. Since Kconfiglib 12.0.0, the library version is available in kconfiglib.VERSION, which is a (<major>, <minor>, <patch>) tuple, e.g. (12, 0, 0). Using Kconfiglib on the Linux kernel with the Makefile targets ============================================================== For the Linux kernel, a handy interface is provided by the scripts/kconfig/Makefile patch, which can be applied with either 'git am' or the 'patch' utility: $ wget -qO- https://raw.githubusercontent.com/ulfalizer/Kconfiglib/master/makefile.patch | git am $ wget -qO- https://raw.githubusercontent.com/ulfalizer/Kconfiglib/master/makefile.patch | patch -p1 Warning: Not passing -p1 to patch will cause the wrong file to be patched. Please tell me if the patch does not apply. It should be trivial to apply manually, as it's just a block of text that needs to be inserted near the other *conf: targets in scripts/kconfig/Makefile. Look further down for a motivation for the Makefile patch and for instructions on how you can use Kconfiglib without it. If you do not wish to install Kconfiglib via pip, the Makefile patch is set up so that you can also just clone Kconfiglib into the kernel root: $ git clone git://github.com/ulfalizer/Kconfiglib.git $ git am Kconfiglib/makefile.patch (or 'patch -p1 < Kconfiglib/makefile.patch') Warning: The directory name Kconfiglib/ is significant in this case, because it's added to PYTHONPATH by the new targets in makefile.patch. The targets added by the Makefile patch are described in the following sections. make kmenuconfig ---------------- This target runs the curses menuconfig interface with Python 3. As of Kconfiglib 12.2.0, both Python 2 and Python 3 are supported (previously, only Python 3 was supported, so this was a backport). make guiconfig -------------- This target runs the Tkinter menuconfig interface. Both Python 2 and Python 3 are supported. To change the Python interpreter used, pass PYTHONCMD=<executable> to 'make'. The default is 'python'. make [ARCH=<arch>] iscriptconfig -------------------------------- This target gives an interactive Python prompt where a Kconfig instance has been preloaded and is available in 'kconf'. To change the Python interpreter used, pass PYTHONCMD=<executable> to 'make'. The default is 'python'. To get a feel for the API, try evaluating and printing the symbols in kconf.defined_syms, and explore the MenuNode menu tree starting at kconf.top_node by following 'next' and 'list' pointers. The item contained in a menu node is found in MenuNode.item (note that this can be one of the constants kconfiglib.MENU and kconfiglib.COMMENT), and all symbols and choices have a 'nodes' attribute containing their menu nodes (usually only one). Printing a menu node will print its item, in Kconfig format. If you want to look up a symbol by name, use the kconf.syms dictionary. make scriptconfig SCRIPT=<script> [SCRIPT_ARG=<arg>] ---------------------------------------------------- This target runs the Python script given by the SCRIPT parameter on the configuration. sys.argv[1] holds the name of the top-level Kconfig file (currently always "Kconfig" in practice), and sys.argv[2] holds the SCRIPT_ARG argument, if given. See the examples/ subdirectory for example scripts. make dumpvarsconfig ------------------- This target prints a list of all environment variables referenced from the Kconfig files, together with their values. See the Kconfiglib/examples/dumpvars.py script. Only environment variables that are referenced via the Kconfig preprocessor $(FOO) syntax are included. The preprocessor was added in Linux 4.18. Using Kconfiglib without the Makefile targets ============================================= The make targets are only needed to pick up environment variables exported from the Kbuild makefiles and referenced inside Kconfig files, via e.g. 'source "arch/$(SRCARCH)/Kconfig" and commands run via '$(shell,...)'. These variables are referenced as of writing (Linux 4.18), together with sample values: srctree (.) ARCH (x86) SRCARCH (x86) KERNELVERSION (4.18.0) CC (gcc) HOSTCC (gcc) HOSTCXX (g++) CC_VERSION_TEXT (gcc (Ubuntu 7.3.0-16ubuntu3) 7.3.0) Older kernels only reference ARCH, SRCARCH, and KERNELVERSION. If your kernel is recent enough (4.18+), you can get a list of referenced environment variables via 'make dumpvarsconfig' (see above). Note that this command is added by the Makefile patch. To run Kconfiglib without the Makefile patch, set the environment variables manually: $ srctree=. ARCH=x86 SRCARCH=x86 KERNELVERSION=`make kernelversion` ... python(3) >>> import kconfiglib >>> kconf = kconfiglib.Kconfig() # filename defaults to "Kconfig" Search the top-level Makefile for "Additional ARCH settings" to see other possibilities for ARCH and SRCARCH. Intro to symbol values ====================== Kconfiglib has the same assignment semantics as the C implementation. Any symbol can be assigned a value by the user (via Kconfig.load_config() or Symbol.set_value()), but this user value is only respected if the symbol is visible, which corresponds to it (currently) being visible in the menuconfig interface. For symbols with prompts, the visibility of the symbol is determined by the condition on the prompt. Symbols without prompts are never visible, so setting a user value on them is pointless. A warning will be printed by default if Symbol.set_value() is called on a promptless symbol. Assignments to promptless symbols are normal within a .config file, so no similar warning will be printed by load_config(). Dependencies from parents and 'if'/'depends on' are propagated to properties, including prompts, so these two configurations are logically equivalent: (1) menu "menu" depends on A if B config FOO tristate "foo" if D default y depends on C endif endmenu (2) menu "menu" depends on A config FOO tristate "foo" if A && B && C && D default y if A && B && C endmenu In this example, A && B && C && D (the prompt condition) needs to be non-n for FOO to be visible (assignable). If its value is m, the symbol can only be assigned the value m: The visibility sets an upper bound on the value that can be assigned by the user, and any higher user value will be truncated down. 'default' properties are independent of the visibility, though a 'default' will often get the same condition as the prompt due to dependency propagation. 'default' properties are used if the symbol is not visible or has no user value. Symbols with no user value (or that have a user value but are not visible) and no (active) 'default' default to n for bool/tristate symbols, and to the empty string for other symbol types. 'select' works similarly to symbol visibility, but sets a lower bound on the value of the symbol. The lower bound is determined by the value of the select*ing* symbol. 'select' does not respect visibility, so non-visible symbols can be forced to a particular (minimum) value by a select as well. For non-bool/tristate symbols, it only matters whether the visibility is n or non-n: m visibility acts the same as y visibility. Conditions on 'default' and 'select' work in mostly intuitive ways. If the condition is n, the 'default' or 'select' is disabled. If it is m, the 'default' or 'select' value (the value of the selecting symbol) is truncated down to m. When writing a configuration with Kconfig.write_config(), only symbols that are visible, have an (active) default, or are selected will get written out (note that this includes all symbols that would accept user values). Kconfiglib matches the .config format produced by the C implementations down to the character. This eases testing. For a visible bool/tristate symbol FOO with value n, this line is written to .config: # CONFIG_FOO is not set The point is to remember the user n selection (which might differ from the default value the symbol would get), while at the same sticking to the rule that undefined corresponds to n (.config uses Makefile format, making the line above a comment). When the .config file is read back in, this line will be treated the same as the following assignment: CONFIG_FOO=n In Kconfiglib, the set of (currently) assignable values for a bool/tristate symbol appear in Symbol.assignable. For other symbol types, just check if sym.visibility is non-0 (non-n) to see whether the user value will have an effect. Intro to the menu tree ====================== The menu structure, as seen in e.g. menuconfig, is represented by a tree of MenuNode objects. The top node of the configuration corresponds to an implicit top-level menu, the title of which is shown at the top in the standard menuconfig interface. (The title is also available in Kconfig.mainmenu_text in Kconfiglib.) The top node is found in Kconfig.top_node. From there, you can visit child menu nodes by following the 'list' pointer, and any following menu nodes by following the 'next' pointer. Usually, a non-None 'list' pointer indicates a menu or Choice, but menu nodes for symbols can sometimes have a non-None 'list' pointer too due to submenus created implicitly from dependencies. MenuNode.item is either a Symbol or a Choice object, or one of the constants MENU and COMMENT. The prompt of the menu node can be found in MenuNode.prompt, which also holds the title for menus and comments. For Symbol and Choice, MenuNode.help holds the help text (if any, otherwise None). Most symbols will only have a single menu node. A symbol defined in multiple locations will have one menu node for each location. The list of menu nodes for a Symbol or Choice can be found in the Symbol/Choice.nodes attribute. Note that prompts and help texts for symbols and choices are stored in their menu node(s) rather than in the Symbol or Choice objects themselves. This makes it possible to define a symbol in multiple locations with a different prompt or help text in each location. To get the help text or prompt for a symbol with a single menu node, do sym.nodes[0].help and sym.nodes[0].prompt, respectively. The prompt is a (text, condition) tuple, where condition determines the visibility (see 'Intro to expressions' below). This organization mirrors the C implementation. MenuNode is called 'struct menu' there, but I thought "menu" was a confusing name. It is possible to give a Choice a name and define it in multiple locations, hence why Choice.nodes is also a list. As a convenience, the properties added at a particular definition location are available on the MenuNode itself, in e.g. MenuNode.defaults. This is helpful when generating documentation, so that symbols/choices defined in multiple locations can be shown with the correct properties at each location. Intro to expressions ==================== Expressions can be evaluated with the expr_value() function and printed with the expr_str() function (these are used internally as well). Evaluating an expression always yields a tristate value, where n, m, and y are represented as 0, 1, and 2, respectively. The following table should help you figure out how expressions are represented. A, B, C, ... are symbols (Symbol instances), NOT is the kconfiglib.NOT constant, etc. Expression Representation ---------- -------------- A A "A" A (constant symbol) !A (NOT, A) A && B (AND, A, B) A && B && C (AND, A, (AND, B, C)) A || B (OR, A, B) A || (B && C && D) (OR, A, (AND, B, (AND, C, D))) A = B (EQUAL, A, B) A != "foo" (UNEQUAL, A, foo (constant symbol)) A && B = C && D (AND, A, (AND, (EQUAL, B, C), D)) n Kconfig.n (constant symbol) m Kconfig.m (constant symbol) y Kconfig.y (constant symbol) "y" Kconfig.y (constant symbol) Strings like "foo" in 'default "foo"' or 'depends on SYM = "foo"' are represented as constant symbols, so the only values that appear in expressions are symbols***. This mirrors the C implementation. ***For choice symbols, the parent Choice will appear in expressions as well, but it's usually invisible as the value interfaces of Symbol and Choice are identical. This mirrors the C implementation and makes different choice modes "just work". Manual evaluation examples: - The value of A && B is min(A.tri_value, B.tri_value) - The value of A || B is max(A.tri_value, B.tri_value) - The value of !A is 2 - A.tri_value - The value of A = B is 2 (y) if A.str_value == B.str_value, and 0 (n) otherwise. Note that str_value is used here instead of tri_value. For constant (as well as undefined) symbols, str_value matches the name of the symbol. This mirrors the C implementation and explains why 'depends on SYM = "foo"' above works as expected. n/m/y are automatically converted to the corresponding constant symbols "n"/"m"/"y" (Kconfig.n/m/y) during parsing. Kconfig.const_syms is a dictionary like Kconfig.syms but for constant symbols. If a condition is missing (e.g., <cond> when the 'if <cond>' is removed from 'default A if <cond>'), it is actually Kconfig.y. The standard __str__() functions just avoid printing 'if y' conditions to give cleaner output. Kconfig extensions ================== Kconfiglib includes a couple of Kconfig extensions: 'source' with relative path --------------------------- The 'rsource' statement sources Kconfig files with a path relative to directory of the Kconfig file containing the 'rsource' statement, instead of relative to the project root. Consider following directory tree: Project +--Kconfig | +--src +--Kconfig | +--SubSystem1 +--Kconfig | +--ModuleA +--Kconfig In this example, assume that src/SubSystem1/Kconfig wants to source src/SubSystem1/ModuleA/Kconfig. With 'source', this statement would be used: source "src/SubSystem1/ModuleA/Kconfig" With 'rsource', this turns into rsource "ModuleA/Kconfig" If an absolute path is given to 'rsource', it acts the same as 'source'. 'rsource' can be used to create "position-independent" Kconfig trees that can be moved around freely. Globbing 'source' ----------------- 'source' and 'rsource' accept glob patterns, sourcing all matching Kconfig files. They require at least one matching file, raising a KconfigError otherwise. For example, the following statement might source sub1/foofoofoo and sub2/foobarfoo: source "sub[12]/foo*foo" The glob patterns accepted are the same as for the standard glob.glob() function. Two additional statements are provided for cases where it's acceptable for a pattern to match no files: 'osource' and 'orsource' (the o is for "optional"). For example, the following statements will be no-ops if neither "foo" nor any files matching "bar*" exist: osource "foo" osource "bar*" 'orsource' does a relative optional source. 'source' and 'osource' are analogous to 'include' and '-include' in Make. Generalized def_* keywords -------------------------- def_int, def_hex, and def_string are available in addition to def_bool and def_tristate, allowing int, hex, and string symbols to be given a type and a default at the same time. Extra optional warnings ----------------------- Some optional warnings can be controlled via environment variables: - KCONFIG_WARN_UNDEF: If set to 'y', warnings will be generated for all references to undefined symbols within Kconfig files. The only gotcha is that all hex literals must be prefixed with "0x" or "0X", to make it possible to distinguish them from symbol references. Some projects (e.g. the Linux kernel) use multiple Kconfig trees with many shared Kconfig files, leading to some safe undefined symbol references. KCONFIG_WARN_UNDEF is useful in projects that only have a single Kconfig tree though. KCONFIG_STRICT is an older alias for this environment variable, supported for backwards compatibility. - KCONFIG_WARN_UNDEF_ASSIGN: If set to 'y', warnings will be generated for all assignments to undefined symbols within .config files. By default, no such warnings are generated. This warning can also be enabled/disabled via the Kconfig.warn_assign_undef variable. Preprocessor user functions defined in Python --------------------------------------------- Preprocessor functions can be defined in Python, which makes it simple to integrate information from existing Python tools into Kconfig (e.g. to have Kconfig symbols depend on hardware information stored in some other format). Putting a Python module named kconfigfunctions(.py) anywhere in sys.path will cause it to be imported by Kconfiglib (in Kconfig.__init__()). Note that sys.path can be customized via PYTHONPATH, and includes the directory of the module being run by default, as well as installation directories. If the KCONFIG_FUNCTIONS environment variable is set, it gives a different module name to use instead of 'kconfigfunctions'. The imported module is expected to define a global dictionary named 'functions' that maps function names to Python functions, as follows: def my_fn(kconf, name, arg_1, arg_2, ...): # kconf: # Kconfig instance # # name: # Name of the user-defined function ("my-fn"). Think argv[0]. # # arg_1, arg_2, ...: # Arguments passed to the function from Kconfig (strings) # # Returns a string to be substituted as the result of calling the # function ... def my_other_fn(kconf, name, arg_1, arg_2, ...): ... functions = { "my-fn": (my_fn, <min.args>, <max.args>/None), "my-other-fn": (my_other_fn, <min.args>, <max.args>/None), ... } ... <min.args> and <max.args> are the minimum and maximum number of arguments expected by the function (excluding the implicit 'name' argument). If <max.args> is None, there is no upper limit to the number of arguments. Passing an invalid number of arguments will generate a KconfigError exception. Functions can access the current parsing location as kconf.filename/linenr. Accessing other fields of the Kconfig object is not safe. See the warning below. Keep in mind that for a variable defined like 'foo = $(fn)', 'fn' will be called only when 'foo' is expanded. If 'fn' uses the parsing location and the intent is to use the location of the assignment, you want 'foo := $(fn)' instead, which calls the function immediately. Once defined, user functions can be called from Kconfig in the same way as other preprocessor functions: config FOO ... depends on $(my-fn,arg1,arg2) If my_fn() returns "n", this will result in config FOO ... depends on n Warning ******* User-defined preprocessor functions are called as they're encountered at parse time, before all Kconfig files have been processed, and before the menu tree has been finalized. There are no guarantees that accessing Kconfig symbols or the menu tree via the 'kconf' parameter will work, and it could potentially lead to a crash. Preferably, user-defined functions should be stateless. Feedback ======== Send bug reports, suggestions, and questions to ulfalizer a.t Google's email service, or open a ticket on the GitHub page. """ import errno import importlib import os import re import sys # Get rid of some attribute lookups. These are obvious in context. from glob import iglob from os.path import dirname, exists, expandvars, islink, join, realpath VERSION = (14, 1, 0) # File layout: # # Public classes # Public functions # Internal functions # Global constants # Line length: 79 columns # # Public classes # class Kconfig(object): """ Represents a Kconfig configuration, e.g. for x86 or ARM. This is the set of symbols, choices, and menu nodes appearing in the configuration. Creating any number of Kconfig objects (including for different architectures) is safe. Kconfiglib doesn't keep any global state. The following attributes are available. They should be treated as read-only, and some are implemented through @property magic. syms: A dictionary with all symbols in the configuration, indexed by name. Also includes all symbols that are referenced in expressions but never defined, except for constant (quoted) symbols. Undefined symbols can be recognized by Symbol.nodes being empty -- see the 'Intro to the menu tree' section in the module docstring. const_syms: A dictionary like 'syms' for constant (quoted) symbols named_choices: A dictionary like 'syms' for named choices (choice FOO) defined_syms: A list with all defined symbols, in the same order as they appear in the Kconfig files. Symbols defined in multiple locations appear multiple times. Note: You probably want to use 'unique_defined_syms' instead. This attribute is mostly maintained for backwards compatibility. unique_defined_syms: A list like 'defined_syms', but with duplicates removed. Just the first instance is kept for symbols defined in multiple locations. Kconfig order is preserved otherwise. Using this attribute instead of 'defined_syms' can save work, and automatically gives reasonable behavior when writing configuration output (symbols defined in multiple locations only generate output once, while still preserving Kconfig order for readability). choices: A list with all choices, in the same order as they appear in the Kconfig files. Note: You probably want to use 'unique_choices' instead. This attribute is mostly maintained for backwards compatibility. unique_choices: Analogous to 'unique_defined_syms', for choices. Named choices can have multiple definition locations. menus: A list with all menus, in the same order as they appear in the Kconfig files comments: A list with all comments, in the same order as they appear in the Kconfig files kconfig_filenames: A list with the filenames of all Kconfig files included in the configuration, relative to $srctree (or relative to the current directory if $srctree isn't set), except absolute paths (e.g. 'source "/foo/Kconfig"') are kept as-is. The files are listed in the order they are source'd, starting with the top-level Kconfig file. If a file is source'd multiple times, it will appear multiple times. Use set() to get unique filenames. Note that Kconfig.sync_deps() already indirectly catches any file modifications that change configuration output. env_vars: A set() with the names of all environment variables referenced in the Kconfig files. Only environment variables referenced with the preprocessor $(FOO) syntax will be registered. The older $FOO syntax is only supported for backwards compatibility. Also note that $(FOO) won't be registered unless the environment variable $FOO is actually set. If it isn't, $(FOO) is an expansion of an unset preprocessor variable (which gives the empty string). Another gotcha is that environment variables referenced in the values of recursively expanded preprocessor variables (those defined with =) will only be registered if the variable is actually used (expanded) somewhere. The note from the 'kconfig_filenames' documentation applies here too. n/m/y: The predefined constant symbols n/m/y. Also available in const_syms. modules: The Symbol instance for the modules symbol. Currently hardcoded to MODULES, which is backwards compatible. Kconfiglib will warn if 'option modules' is set on some other symbol. Tell me if you need proper 'option modules' support. 'modules' is never None. If the MODULES symbol is not explicitly defined, its tri_value will be 0 (n), as expected. A simple way to enable modules is to do 'kconf.modules.set_value(2)' (provided the MODULES symbol is defined and visible). Modules are disabled by default in the kernel Kconfig files as of writing, though nearly all defconfig files enable them (with 'CONFIG_MODULES=y'). defconfig_list: The Symbol instance for the 'option defconfig_list' symbol, or None if no defconfig_list symbol exists. The defconfig filename derived from this symbol can be found in Kconfig.defconfig_filename. defconfig_filename: The filename given by the defconfig_list symbol. This is taken from the first 'default' with a satisfied condition where the specified file exists (can be opened for reading). If a defconfig file foo/defconfig is not found and $srctree was set when the Kconfig was created, $srctree/foo/defconfig is looked up as well. 'defconfig_filename' is None if either no defconfig_list symbol exists, or if the defconfig_list symbol has no 'default' with a satisfied condition that specifies a file that exists. Gotcha: scripts/kconfig/Makefile might pass --defconfig=<defconfig> to scripts/kconfig/conf when running e.g. 'make defconfig'. This option overrides the defconfig_list symbol, meaning defconfig_filename might not always match what 'make defconfig' would use. top_node: The menu node (see the MenuNode class) of the implicit top-level menu. Acts as the root of the menu tree. mainmenu_text: The prompt (title) of the top menu (top_node). Defaults to "Main menu". Can be changed with the 'mainmenu' statement (see kconfig-language.txt). variables: A dictionary with all preprocessor variables, indexed by name. See the Variable class. warn: Set this variable to True/False to enable/disable warnings. See Kconfig.__init__(). When 'warn' is False, the values of the other warning-related variables are ignored. This variable as well as the other warn* variables can be read to check the current warning settings. warn_to_stderr: Set this variable to True/False to enable/disable warnings on stderr. See Kconfig.__init__(). warn_assign_undef: Set this variable to True to generate warnings for assignments to undefined symbols in configuration files. This variable is False by default unless the KCONFIG_WARN_UNDEF_ASSIGN environment variable was set to 'y' when the Kconfig instance was created. warn_assign_override: Set this variable to True to generate warnings for multiple assignments to the same symbol in configuration files, where the assignments set different values (e.g. CONFIG_FOO=m followed by CONFIG_FOO=y, where the last value would get used). This variable is True by default. Disabling it might be useful when merging configurations. warn_assign_redun: Like warn_assign_override, but for multiple assignments setting a symbol to the same value. This variable is True by default. Disabling it might be useful when merging configurations. warnings: A list of strings containing all warnings that have been generated, for cases where more flexibility is needed. See the 'warn_to_stderr' parameter to Kconfig.__init__() and the Kconfig.warn_to_stderr variable as well. Note that warnings still get added to Kconfig.warnings when 'warn_to_stderr' is True. Just as for warnings printed to stderr, only warnings that are enabled will get added to Kconfig.warnings. See the various Kconfig.warn* variables. missing_syms: A list with (name, value) tuples for all assignments to undefined symbols within the most recently loaded .config file(s). 'name' is the symbol name without the 'CONFIG_' prefix. 'value' is a string that gives the right-hand side of the assignment verbatim. See Kconfig.load_config() as well. srctree: The value the $srctree environment variable had when the Kconfig instance was created, or the empty string if $srctree wasn't set. This gives nice behavior with os.path.join(), which treats "" as the current directory, without adding "./". Kconfig files are looked up relative to $srctree (unless absolute paths are used), and .config files are looked up relative to $srctree if they are not found in the current directory. This is used to support out-of-tree builds. The C tools use this environment variable in the same way. Changing $srctree after creating the Kconfig instance has no effect. Only the value when the configuration is loaded matters. This avoids surprises if multiple configurations are loaded with different values for $srctree. config_prefix: The value the CONFIG_ environment variable had when the Kconfig instance was created, or "CONFIG_" if CONFIG_ wasn't set. This is the prefix used (and expected) on symbol names in .config files and C headers. Used in the same way in the C tools. config_header: The value the KCONFIG_CONFIG_HEADER environment variable had when the Kconfig instance was created, or the empty string if KCONFIG_CONFIG_HEADER wasn't set. This string is inserted verbatim at the beginning of configuration files. See write_config(). header_header: The value the KCONFIG_AUTOHEADER_HEADER environment variable had when the Kconfig instance was created, or the empty string if KCONFIG_AUTOHEADER_HEADER wasn't set. This string is inserted verbatim at the beginning of header files. See write_autoconf(). filename/linenr: The current parsing location, for use in Python preprocessor functions. See the module docstring. """ __slots__ = ( "_encoding", "_functions", "_set_match", "_srctree_prefix", "_unset_match", "_warn_assign_no_prompt", "choices", "comments", "config_header", "config_prefix", "const_syms", "defconfig_list", "defined_syms", "env_vars", "header_header", "kconfig_filenames", "m", "menus", "missing_syms", "modules", "n", "named_choices", "srctree", "syms", "top_node", "unique_choices", "unique_defined_syms", "variables", "warn", "warn_assign_override", "warn_assign_redun", "warn_assign_undef", "warn_to_stderr", "warnings", "y", # Parsing-related "_parsing_kconfigs", "_readline", "filename", "linenr", "_include_path", "_filestack", "_line", "_tokens", "_tokens_i", "_reuse_tokens", ) # # Public interface # def __init__(self, filename="Kconfig", warn=True, warn_to_stderr=True, encoding="utf-8", suppress_traceback=False): """ Creates a new Kconfig object by parsing Kconfig files. Note that Kconfig files are not the same as .config files (which store configuration symbol values). See the module docstring for some environment variables that influence default warning settings (KCONFIG_WARN_UNDEF and KCONFIG_WARN_UNDEF_ASSIGN). Raises KconfigError on syntax/semantic errors, and OSError or (possibly a subclass of) IOError on IO errors ('errno', 'strerror', and 'filename' are available). Note that IOError is an alias for OSError on Python 3, so it's enough to catch OSError there. If you need Python 2/3 compatibility, it's easiest to catch EnvironmentError, which is a common base class of OSError/IOError on Python 2 and an alias for OSError on Python 3. filename (default: "Kconfig"): The Kconfig file to load. For the Linux kernel, you'll want "Kconfig" from the top-level directory, as environment variables will make sure the right Kconfig is included from there (arch/$SRCARCH/Kconfig as of writing). If $srctree is set, 'filename' will be looked up relative to it. $srctree is also used to look up source'd files within Kconfig files. See the class documentation. If you are using Kconfiglib via 'make scriptconfig', the filename of the base base Kconfig file will be in sys.argv[1]. It's currently always "Kconfig" in practice. warn (default: True): True if warnings related to this configuration should be generated. This can be changed later by setting Kconfig.warn to True/False. It is provided as a constructor argument since warnings might be generated during parsing. See the other Kconfig.warn_* variables as well, which enable or suppress certain warnings when warnings are enabled. All generated warnings are added to the Kconfig.warnings list. See the class documentation. warn_to_stderr (default: True): True if warnings should be printed to stderr in addition to being added to Kconfig.warnings. This can be changed later by setting Kconfig.warn_to_stderr to True/False. encoding (default: "utf-8"): The encoding to use when reading and writing files, and when decoding output from commands run via $(shell). If None, the encoding specified in the current locale will be used. The "utf-8" default avoids exceptions on systems that are configured to use the C locale, which implies an ASCII encoding. This parameter has no effect on Python 2, due to implementation issues (regular strings turning into Unicode strings, which are distinct in Python 2). Python 2 doesn't decode regular strings anyway. Related PEP: https://www.python.org/dev/peps/pep-0538/ suppress_traceback (default: False): Helper for tools. When True, any EnvironmentError or KconfigError generated during parsing is caught, the exception message is printed to stderr together with the command name, and sys.exit(1) is called (which generates SystemExit). This hides the Python traceback for "expected" errors like syntax errors in Kconfig files. Other exceptions besides EnvironmentError and KconfigError are still propagated when suppress_traceback is True. """ try: self._init(filename, warn, warn_to_stderr, encoding) except (EnvironmentError, KconfigError) as e: if suppress_traceback: cmd = sys.argv[0] # Empty string if missing if cmd: cmd += ": " # Some long exception messages have extra newlines for better # formatting when reported as an unhandled exception. Strip # them here. sys.exit(cmd + str(e).strip()) raise def _init(self, filename, warn, warn_to_stderr, encoding): # See __init__() self._encoding = encoding self.srctree = os.getenv("srctree", "") # A prefix we can reliably strip from glob() results to get a filename # relative to $srctree. relpath() can cause issues for symlinks, # because it assumes symlink/../foo is the same as foo/. self._srctree_prefix = realpath(self.srctree) + os.sep self.warn = warn self.warn_to_stderr = warn_to_stderr self.warn_assign_undef = os.getenv("KCONFIG_WARN_UNDEF_ASSIGN") == "y" self.warn_assign_override = True self.warn_assign_redun = True self._warn_assign_no_prompt = True self.warnings = [] self.config_prefix = os.getenv("CONFIG_", "CONFIG_") # Regular expressions for parsing .config files self._set_match = _re_match(self.config_prefix + r"([^=]+)=(.*)") self._unset_match = _re_match(r"# {}([^ ]+) is not set".format( self.config_prefix)) self.config_header = os.getenv("KCONFIG_CONFIG_HEADER", "") self.header_header = os.getenv("KCONFIG_AUTOHEADER_HEADER", "") self.syms = {} self.const_syms = {} self.defined_syms = [] self.missing_syms = [] self.named_choices = {} self.choices = [] self.menus = [] self.comments = [] for nmy in "n", "m", "y": sym = Symbol() sym.kconfig = self sym.name = nmy sym.is_constant = True sym.orig_type = TRISTATE sym._cached_tri_val = STR_TO_TRI[nmy] self.const_syms[nmy] = sym self.n = self.const_syms["n"] self.m = self.const_syms["m"] self.y = self.const_syms["y"] # Make n/m/y well-formed symbols for nmy in "n", "m", "y": sym = self.const_syms[nmy] sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n # Maps preprocessor variables names to Variable instances self.variables = {} # Predefined preprocessor functions, with min/max number of arguments self._functions = { "info": (_info_fn, 1, 1), "error-if": (_error_if_fn, 2, 2), "filename": (_filename_fn, 0, 0), "lineno": (_lineno_fn, 0, 0), "shell": (_shell_fn, 1, 1), "warning-if": (_warning_if_fn, 2, 2), } # Add any user-defined preprocessor functions try: self._functions.update( importlib.import_module( os.getenv("KCONFIG_FUNCTIONS", "kconfigfunctions") ).functions) except ImportError: pass # This determines whether previously unseen symbols are registered. # They shouldn't be if we parse expressions after parsing, as part of # Kconfig.eval_string(). self._parsing_kconfigs = True self.modules = self._lookup_sym("MODULES") self.defconfig_list = None self.top_node = MenuNode() self.top_node.kconfig = self self.top_node.item = MENU self.top_node.is_menuconfig = True self.top_node.visibility = self.y self.top_node.prompt = ("Main menu", self.y) self.top_node.parent = None self.top_node.dep = self.y self.top_node.filename = filename self.top_node.linenr = 1 self.top_node.include_path = () # Parse the Kconfig files # Not used internally. Provided as a convenience. self.kconfig_filenames = [filename] self.env_vars = set() # Keeps track of the location in the parent Kconfig files. Kconfig # files usually source other Kconfig files. See _enter_file(). self._filestack = [] self._include_path = () # The current parsing location self.filename = filename self.linenr = 0 # Used to avoid retokenizing lines when we discover that they're not # part of the construct currently being parsed. This is kinda like an # unget operation. self._reuse_tokens = False # Open the top-level Kconfig file. Store the readline() method directly # as a small optimization. self._readline = self._open(join(self.srctree, filename), "r").readline try: # Parse the Kconfig files. Returns the last node, which we # terminate with '.next = None'. self._parse_block(None, self.top_node, self.top_node).next = None self.top_node.list = self.top_node.next self.top_node.next = None except UnicodeDecodeError as e: _decoding_error(e, self.filename) # Close the top-level Kconfig file. __self__ fetches the 'file' object # for the method. self._readline.__self__.close() self._parsing_kconfigs = False # Do various menu tree post-processing self._finalize_node(self.top_node, self.y) self.unique_defined_syms = _ordered_unique(self.defined_syms) self.unique_choices = _ordered_unique(self.choices) # Do sanity checks. Some of these depend on everything being finalized. self._check_sym_sanity() self._check_choice_sanity() # KCONFIG_STRICT is an older alias for KCONFIG_WARN_UNDEF, supported # for backwards compatibility if os.getenv("KCONFIG_WARN_UNDEF") == "y" or \ os.getenv("KCONFIG_STRICT") == "y": self._check_undef_syms() # Build Symbol._dependents for all symbols and choices self._build_dep() # Check for dependency loops check_dep_loop_sym = _check_dep_loop_sym # Micro-optimization for sym in self.unique_defined_syms: check_dep_loop_sym(sym, False) # Add extra dependencies from choices to choice symbols that get # awkward during dependency loop detection self._add_choice_deps() @property def mainmenu_text(self): """ See the class documentation. """ return self.top_node.prompt[0] @property def defconfig_filename(self): """ See the class documentation. """ if self.defconfig_list: for filename, cond in self.defconfig_list.defaults: if expr_value(cond): try: with self._open_config(filename.str_value) as f: return f.name except EnvironmentError: continue return None def load_config(self, filename=None, replace=True, verbose=None): """ Loads symbol values from a file in the .config format. Equivalent to calling Symbol.set_value() to set each of the values. "# CONFIG_FOO is not set" within a .config file sets the user value of FOO to n. The C tools work the same way. For each symbol, the Symbol.user_value attribute holds the value the symbol was assigned in the .config file (if any). The user value might differ from Symbol.str/tri_value if there are unsatisfied dependencies. Calling this function also updates the Kconfig.missing_syms attribute with a list of all assignments to undefined symbols within the configuration file. Kconfig.missing_syms is cleared if 'replace' is True, and appended to otherwise. See the documentation for Kconfig.missing_syms as well. See the Kconfig.__init__() docstring for raised exceptions (OSError/IOError). KconfigError is never raised here. filename (default: None): Path to load configuration from (a string). Respects $srctree if set (see the class documentation). If 'filename' is None (the default), the configuration file to load (if any) is calculated automatically, giving the behavior you'd usually want: 1. If the KCONFIG_CONFIG environment variable is set, it gives the path to the configuration file to load. Otherwise, ".config" is used. See standard_config_filename(). 2. If the path from (1.) doesn't exist, the configuration file given by kconf.defconfig_filename is loaded instead, which is derived from the 'option defconfig_list' symbol. 3. If (1.) and (2.) fail to find a configuration file to load, no configuration file is loaded, and symbols retain their current values (e.g., their default values). This is not an error. See the return value as well. replace (default: True): If True, all existing user values will be cleared before loading the .config. Pass False to merge configurations. verbose (default: None): Limited backwards compatibility to prevent crashes. A warning is printed if anything but None is passed. Prior to Kconfiglib 12.0.0, this option enabled printing of messages to stdout when 'filename' was None. A message is (always) returned now instead, which is more flexible. Will probably be removed in some future version. Returns a string with a message saying which file got loaded (or possibly that no file got loaded, when 'filename' is None). This is meant to reduce boilerplate in tools, which can do e.g. print(kconf.load_config()). The returned message distinguishes between loading (replace == True) and merging (replace == False). """ if verbose is not None: _warn_verbose_deprecated("load_config") msg = None if filename is None: filename = standard_config_filename() if not exists(filename) and \ not exists(join(self.srctree, filename)): defconfig = self.defconfig_filename if defconfig is None: return "Using default symbol values (no '{}')" \ .format(filename) msg = " default configuration '{}' (no '{}')" \ .format(defconfig, filename) filename = defconfig if not msg: msg = " configuration '{}'".format(filename) # Disable the warning about assigning to symbols without prompts. This # is normal and expected within a .config file. self._warn_assign_no_prompt = False # This stub only exists to make sure _warn_assign_no_prompt gets # reenabled try: self._load_config(filename, replace) except UnicodeDecodeError as e: _decoding_error(e, filename) finally: self._warn_assign_no_prompt = True return ("Loaded" if replace else "Merged") + msg def _load_config(self, filename, replace): with self._open_config(filename) as f: if replace: self.missing_syms = [] # If we're replacing the configuration, keep track of which # symbols and choices got set so that we can unset the rest # later. This avoids invalidating everything and is faster. # Another benefit is that invalidation must be rock solid for # it to work, making it a good test. for sym in self.unique_defined_syms: sym._was_set = False for choice in self.unique_choices: choice._was_set = False # Small optimizations set_match = self._set_match unset_match = self._unset_match get_sym = self.syms.get for linenr, line in enumerate(f, 1): # The C tools ignore trailing whitespace line = line.rstrip() match = set_match(line) if match: name, val = match.groups() sym = get_sym(name) if not sym or not sym.nodes: self._undef_assign(name, val, filename, linenr) continue if sym.orig_type in _BOOL_TRISTATE: # The C implementation only checks the first character # to the right of '=', for whatever reason if not (sym.orig_type is BOOL and val.startswith(("y", "n")) or sym.orig_type is TRISTATE and val.startswith(("y", "m", "n"))): self._warn("'{}' is not a valid value for the {} " "symbol {}. Assignment ignored." .format(val, TYPE_TO_STR[sym.orig_type], sym.name_and_loc), filename, linenr) continue val = val[0] if sym.choice and val != "n": # During .config loading, we infer the mode of the # choice from the kind of values that are assigned # to the choice symbols prev_mode = sym.choice.user_value if prev_mode is not None and \ TRI_TO_STR[prev_mode] != val: self._warn("both m and y assigned to symbols " "within the same choice", filename, linenr) # Set the choice's mode sym.choice.set_value(val) elif sym.orig_type is STRING: match = _conf_string_match(val) if not match: self._warn("malformed string literal in " "assignment to {}. Assignment ignored." .format(sym.name_and_loc), filename, linenr) continue val = unescape(match.group(1)) else: match = unset_match(line) if not match: # Print a warning for lines that match neither # set_match() nor unset_match() and that are not blank # lines or comments. 'line' has already been # rstrip()'d, so blank lines show up as "" here. if line and not line.lstrip().startswith("#"): self._warn("ignoring malformed line '{}'" .format(line), filename, linenr) continue name = match.group(1) sym = get_sym(name) if not sym or not sym.nodes: self._undef_assign(name, "n", filename, linenr) continue if sym.orig_type not in _BOOL_TRISTATE: continue val = "n" # Done parsing the assignment. Set the value. if sym._was_set: self._assigned_twice(sym, val, filename, linenr) sym.set_value(val) if replace: # If we're replacing the configuration, unset the symbols that # didn't get set for sym in self.unique_defined_syms: if not sym._was_set: sym.unset_value() for choice in self.unique_choices: if not choice._was_set: choice.unset_value() def _undef_assign(self, name, val, filename, linenr): # Called for assignments to undefined symbols during .config loading self.missing_syms.append((name, val)) if self.warn_assign_undef: self._warn( "attempt to assign the value '{}' to the undefined symbol {}" .format(val, name), filename, linenr) def _assigned_twice(self, sym, new_val, filename, linenr): # Called when a symbol is assigned more than once in a .config file # Use strings for bool/tristate user values in the warning if sym.orig_type in _BOOL_TRISTATE: user_val = TRI_TO_STR[sym.user_value] else: user_val = sym.user_value msg = '{} set more than once. Old value "{}", new value "{}".'.format( sym.name_and_loc, user_val, new_val) if user_val == new_val: if self.warn_assign_redun: self._warn(msg, filename, linenr) elif self.warn_assign_override: self._warn(msg, filename, linenr) def load_allconfig(self, filename): """ Helper for all*config. Loads (merges) the configuration file specified by KCONFIG_ALLCONFIG, if any. See Documentation/kbuild/kconfig.txt in the Linux kernel. Disables warnings for duplicated assignments within configuration files for the duration of the call (kconf.warn_assign_override/warn_assign_redun = False), and restores the previous warning settings at the end. The KCONFIG_ALLCONFIG configuration file is expected to override symbols. Exits with sys.exit() (which raises a SystemExit exception) and prints an error to stderr if KCONFIG_ALLCONFIG is set but the configuration file can't be opened. filename: Command-specific configuration filename - "allyes.config", "allno.config", etc. """ load_allconfig(self, filename) def write_autoconf(self, filename=None, header=None): r""" Writes out symbol values as a C header file, matching the format used by include/generated/autoconf.h in the kernel. The ordering of the #defines matches the one generated by write_config(). The order in the C implementation depends on the hash table implementation as of writing, and so won't match. If 'filename' exists and its contents is identical to what would get written out, it is left untouched. This avoids updating file metadata like the modification time and possibly triggering redundant work in build tools. filename (default: None): Path to write header to. If None (the default), the path in the environment variable KCONFIG_AUTOHEADER is used if set, and "include/generated/autoconf.h" otherwise. This is compatible with the C tools. header (default: None): Text inserted verbatim at the beginning of the file. You would usually want it enclosed in '/* */' to make it a C comment, and include a trailing newline. If None (the default), the value of the environment variable KCONFIG_AUTOHEADER_HEADER had when the Kconfig instance was created will be used if it was set, and no header otherwise. See the Kconfig.header_header attribute. Returns a string with a message saying that the header got saved, or that there were no changes to it. This is meant to reduce boilerplate in tools, which can do e.g. print(kconf.write_autoconf()). """ if filename is None: filename = os.getenv("KCONFIG_AUTOHEADER", "include/generated/autoconf.h") if self._write_if_changed(filename, self._autoconf_contents(header)): return "Kconfig header saved to '{}'".format(filename) return "No change to Kconfig header in '{}'".format(filename) def _autoconf_contents(self, header): # write_autoconf() helper. Returns the contents to write as a string, # with 'header' or KCONFIG_AUTOHEADER_HEADER at the beginning. if header is None: header = self.header_header chunks = [header] # "".join()ed later add = chunks.append for sym in self.unique_defined_syms: # _write_to_conf is determined when the value is calculated. This # is a hidden function call due to property magic. # # Note: In client code, you can check if sym.config_string is empty # instead, to avoid accessing the internal _write_to_conf variable # (though it's likely to keep working). val = sym.str_value if not sym._write_to_conf: continue if sym.orig_type in _BOOL_TRISTATE: if val == "y": add("#define {}{} 1\n" .format(self.config_prefix, sym.name)) elif val == "m": add("#define {}{}_MODULE 1\n" .format(self.config_prefix, sym.name)) elif sym.orig_type is STRING: add('#define {}{} "{}"\n' .format(self.config_prefix, sym.name, escape(val))) else: # sym.orig_type in _INT_HEX: if sym.orig_type is HEX and \ not val.startswith(("0x", "0X")): val = "0x" + val add("#define {}{} {}\n" .format(self.config_prefix, sym.name, val)) return "".join(chunks) def write_config(self, filename=None, header=None, save_old=True, verbose=None): r""" Writes out symbol values in the .config format. The format matches the C implementation, including ordering. Symbols appear in the same order in generated .config files as they do in the Kconfig files. For symbols defined in multiple locations, a single assignment is written out corresponding to the first location where the symbol is defined. See the 'Intro to symbol values' section in the module docstring to understand which symbols get written out. If 'filename' exists and its contents is identical to what would get written out, it is left untouched. This avoids updating file metadata like the modification time and possibly triggering redundant work in build tools. See the Kconfig.__init__() docstring for raised exceptions (OSError/IOError). KconfigError is never raised here. filename (default: None): Path to write configuration to (a string). If None (the default), the path in the environment variable KCONFIG_CONFIG is used if set, and ".config" otherwise. See standard_config_filename(). header (default: None): Text inserted verbatim at the beginning of the file. You would usually want each line to start with '#' to make it a comment, and include a trailing newline. if None (the default), the value of the environment variable KCONFIG_CONFIG_HEADER had when the Kconfig instance was created will be used if it was set, and no header otherwise. See the Kconfig.config_header attribute. save_old (default: True): If True and <filename> already exists, a copy of it will be saved to <filename>.old in the same directory before the new configuration is written. Errors are silently ignored if <filename>.old cannot be written (e.g. due to permissions errors). verbose (default: None): Limited backwards compatibility to prevent crashes. A warning is printed if anything but None is passed. Prior to Kconfiglib 12.0.0, this option enabled printing of messages to stdout when 'filename' was None. A message is (always) returned now instead, which is more flexible. Will probably be removed in some future version. Returns a string with a message saying which file got saved. This is meant to reduce boilerplate in tools, which can do e.g. print(kconf.write_config()). """ if verbose is not None: _warn_verbose_deprecated("write_config") if filename is None: filename = standard_config_filename() contents = self._config_contents(header) if self._contents_eq(filename, contents): return "No change to configuration in '{}'".format(filename) if save_old: _save_old(filename) with self._open(filename, "w") as f: f.write(contents) return "Configuration saved to '{}'".format(filename) def _config_contents(self, header): # write_config() helper. Returns the contents to write as a string, # with 'header' or KCONFIG_CONFIG_HEADER at the beginning. # # More memory friendly would be to 'yield' the strings and # "".join(_config_contents()), but it was a bit slower on my system. # node_iter() was used here before commit 3aea9f7 ("Add '# end of # <menu>' after menus in .config"). Those comments get tricky to # implement with it. for sym in self.unique_defined_syms: sym._visited = False if header is None: header = self.config_header chunks = [header] # "".join()ed later add = chunks.append # Did we just print an '# end of ...' comment? after_end_comment = False node = self.top_node while 1: # Jump to the next node with an iterative tree walk if node.list: node = node.list elif node.next: node = node.next else: while node.parent: node = node.parent # Add a comment when leaving visible menus if node.item is MENU and expr_value(node.dep) and \ expr_value(node.visibility) and \ node is not self.top_node: add("# end of {}\n".format(node.prompt[0])) after_end_comment = True if node.next: node = node.next break else: # No more nodes return "".join(chunks) # Generate configuration output for the node item = node.item if item.__class__ is Symbol: if item._visited: continue item._visited = True conf_string = item.config_string if not conf_string: continue if after_end_comment: # Add a blank line before the first symbol printed after an # '# end of ...' comment after_end_comment = False add("\n") add(conf_string) elif expr_value(node.dep) and \ ((item is MENU and expr_value(node.visibility)) or item is COMMENT): add("\n#\n# {}\n#\n".format(node.prompt[0])) after_end_comment = False def write_min_config(self, filename, header=None): """ Writes out a "minimal" configuration file, omitting symbols whose value matches their default value. The format matches the one produced by 'make savedefconfig'. The resulting configuration file is incomplete, but a complete configuration can be derived from it by loading it. Minimal configuration files can serve as a more manageable configuration format compared to a "full" .config file, especially when configurations files are merged or edited by hand. See the Kconfig.__init__() docstring for raised exceptions (OSError/IOError). KconfigError is never raised here. filename: Path to write minimal configuration to. header (default: None): Text inserted verbatim at the beginning of the file. You would usually want each line to start with '#' to make it a comment, and include a final terminating newline. if None (the default), the value of the environment variable KCONFIG_CONFIG_HEADER had when the Kconfig instance was created will be used if it was set, and no header otherwise. See the Kconfig.config_header attribute. Returns a string with a message saying the minimal configuration got saved, or that there were no changes to it. This is meant to reduce boilerplate in tools, which can do e.g. print(kconf.write_min_config()). """ if self._write_if_changed(filename, self._min_config_contents(header)): return "Minimal configuration saved to '{}'".format(filename) return "No change to minimal configuration in '{}'".format(filename) def _min_config_contents(self, header): # write_min_config() helper. Returns the contents to write as a string, # with 'header' or KCONFIG_CONFIG_HEADER at the beginning. if header is None: header = self.config_header chunks = [header] # "".join()ed later add = chunks.append for sym in self.unique_defined_syms: # Skip symbols that cannot be changed. Only check # non-choice symbols, as selects don't affect choice # symbols. if not sym.choice and \ sym.visibility <= expr_value(sym.rev_dep): continue # Skip symbols whose value matches their default if sym.str_value == sym._str_default(): continue # Skip symbols that would be selected by default in a # choice, unless the choice is optional or the symbol type # isn't bool (it might be possible to set the choice mode # to n or the symbol to m in those cases). if sym.choice and \ not sym.choice.is_optional and \ sym.choice._selection_from_defaults() is sym and \ sym.orig_type is BOOL and \ sym.tri_value == 2: continue add(sym.config_string) return "".join(chunks) def sync_deps(self, path): """ Creates or updates a directory structure that can be used to avoid doing a full rebuild whenever the configuration is changed, mirroring include/config/ in the kernel. This function is intended to be called during each build, before compiling source files that depend on configuration symbols. See the Kconfig.__init__() docstring for raised exceptions (OSError/IOError). KconfigError is never raised here. path: Path to directory sync_deps(path) does the following: 1. If the directory <path> does not exist, it is created. 2. If <path>/auto.conf exists, old symbol values are loaded from it, which are then compared against the current symbol values. If a symbol has changed value (would generate different output in autoconf.h compared to before), the change is signaled by touch'ing a file corresponding to the symbol. The first time sync_deps() is run on a directory, <path>/auto.conf won't exist, and no old symbol values will be available. This logically has the same effect as updating the entire configuration. The path to a symbol's file is calculated from the symbol's name by replacing all '_' with '/' and appending '.h'. For example, the symbol FOO_BAR_BAZ gets the file <path>/foo/bar/baz.h, and FOO gets the file <path>/foo.h. This scheme matches the C tools. The point is to avoid having a single directory with a huge number of files, which the underlying filesystem might not handle well. 3. A new auto.conf with the current symbol values is written, to keep track of them for the next build. If auto.conf exists and its contents is identical to what would get written out, it is left untouched. This avoids updating file metadata like the modification time and possibly triggering redundant work in build tools. The last piece of the puzzle is knowing what symbols each source file depends on. Knowing that, dependencies can be added from source files to the files corresponding to the symbols they depends on. The source file will then get recompiled (only) when the symbol value changes (provided sync_deps() is run first during each build). The tool in the kernel that extracts symbol dependencies from source files is scripts/basic/fixdep.c. Missing symbol files also correspond to "not changed", which fixdep deals with by using the $(wildcard) Make function when adding symbol prerequisites to source files. In case you need a different scheme for your project, the sync_deps() implementation can be used as a template. """ if not exists(path): os.mkdir(path, 0o755) # Load old values from auto.conf, if any self._load_old_vals(path) for sym in self.unique_defined_syms: # _write_to_conf is determined when the value is calculated. This # is a hidden function call due to property magic. # # Note: In client code, you can check if sym.config_string is empty # instead, to avoid accessing the internal _write_to_conf variable # (though it's likely to keep working). val = sym.str_value # n tristate values do not get written to auto.conf and autoconf.h, # making a missing symbol logically equivalent to n if sym._write_to_conf: if sym._old_val is None and \ sym.orig_type in _BOOL_TRISTATE and \ val == "n": # No old value (the symbol was missing or n), new value n. # No change. continue if val == sym._old_val: # New value matches old. No change. continue elif sym._old_val is None: # The symbol wouldn't appear in autoconf.h (because # _write_to_conf is false), and it wouldn't have appeared in # autoconf.h previously either (because it didn't appear in # auto.conf). No change. continue # 'sym' has a new value. Flag it. _touch_dep_file(path, sym.name) # Remember the current values as the "new old" values. # # This call could go anywhere after the call to _load_old_vals(), but # putting it last means _sync_deps() can be safely rerun if it fails # before this point. self._write_old_vals(path) def _load_old_vals(self, path): # Loads old symbol values from auto.conf into a dedicated # Symbol._old_val field. Mirrors load_config(). # # The extra field could be avoided with some trickery involving dumping # symbol values and restoring them later, but this is simpler and # faster. The C tools also use a dedicated field for this purpose. for sym in self.unique_defined_syms: sym._old_val = None try: auto_conf = self._open(join(path, "auto.conf"), "r") except EnvironmentError as e: if e.errno == errno.ENOENT: # No old values return raise with auto_conf as f: for line in f: match = self._set_match(line) if not match: # We only expect CONFIG_FOO=... (and possibly a header # comment) in auto.conf continue name, val = match.groups() if name in self.syms: sym = self.syms[name] if sym.orig_type is STRING: match = _conf_string_match(val) if not match: continue val = unescape(match.group(1)) self.syms[name]._old_val = val else: # Flag that the symbol no longer exists, in # case something still depends on it _touch_dep_file(path, name) def _write_old_vals(self, path): # Helper for writing auto.conf. Basically just a simplified # write_config() that doesn't write any comments (including # '# CONFIG_FOO is not set' comments). The format matches the C # implementation, though the ordering is arbitrary there (depends on # the hash table implementation). # # A separate helper function is neater than complicating write_config() # by passing a flag to it, plus we only need to look at symbols here. self._write_if_changed( os.path.join(path, "auto.conf"), self._old_vals_contents()) def _old_vals_contents(self): # _write_old_vals() helper. Returns the contents to write as a string. # Temporary list instead of generator makes this a bit faster return "".join([ sym.config_string for sym in self.unique_defined_syms if not (sym.orig_type in _BOOL_TRISTATE and not sym.tri_value) ]) def node_iter(self, unique_syms=False): """ Returns a generator for iterating through all MenuNode's in the Kconfig tree. The iteration is done in Kconfig definition order (each node is visited before its children, and the children of a node are visited before the next node). The Kconfig.top_node menu node is skipped. It contains an implicit menu that holds the top-level items. As an example, the following code will produce a list equal to Kconfig.defined_syms: defined_syms = [node.item for node in kconf.node_iter() if isinstance(node.item, Symbol)] unique_syms (default: False): If True, only the first MenuNode will be included for symbols defined in multiple locations. Using kconf.node_iter(True) in the example above would give a list equal to unique_defined_syms. """ if unique_syms: for sym in self.unique_defined_syms: sym._visited = False node = self.top_node while 1: # Jump to the next node with an iterative tree walk if node.list: node = node.list elif node.next: node = node.next else: while node.parent: node = node.parent if node.next: node = node.next break else: # No more nodes return if unique_syms and node.item.__class__ is Symbol: if node.item._visited: continue node.item._visited = True yield node def eval_string(self, s): """ Returns the tristate value of the expression 's', represented as 0, 1, and 2 for n, m, and y, respectively. Raises KconfigError on syntax errors. Warns if undefined symbols are referenced. As an example, if FOO and BAR are tristate symbols at least one of which has the value y, then eval_string("y && (FOO || BAR)") returns 2 (y). To get the string value of non-bool/tristate symbols, use Symbol.str_value. eval_string() always returns a tristate value, and all non-bool/tristate symbols have the tristate value 0 (n). The expression parsing is consistent with how parsing works for conditional ('if ...') expressions in the configuration, and matches the C implementation. m is rewritten to 'm && MODULES', so eval_string("m") will return 0 (n) unless modules are enabled. """ # The parser is optimized to be fast when parsing Kconfig files (where # an expression can never appear at the beginning of a line). We have # to monkey-patch things a bit here to reuse it. self.filename = None self._tokens = self._tokenize("if " + s) # Strip "if " to avoid giving confusing error messages self._line = s self._tokens_i = 1 # Skip the 'if' token return expr_value(self._expect_expr_and_eol()) def unset_values(self): """ Removes any user values from all symbols, as if Kconfig.load_config() or Symbol.set_value() had never been called. """ self._warn_assign_no_prompt = False try: # set_value() already rejects undefined symbols, and they don't # need to be invalidated (because their value never changes), so we # can just iterate over defined symbols for sym in self.unique_defined_syms: sym.unset_value() for choice in self.unique_choices: choice.unset_value() finally: self._warn_assign_no_prompt = True def enable_warnings(self): """ Do 'Kconfig.warn = True' instead. Maintained for backwards compatibility. """ self.warn = True def disable_warnings(self): """ Do 'Kconfig.warn = False' instead. Maintained for backwards compatibility. """ self.warn = False def enable_stderr_warnings(self): """ Do 'Kconfig.warn_to_stderr = True' instead. Maintained for backwards compatibility. """ self.warn_to_stderr = True def disable_stderr_warnings(self): """ Do 'Kconfig.warn_to_stderr = False' instead. Maintained for backwards compatibility. """ self.warn_to_stderr = False def enable_undef_warnings(self): """ Do 'Kconfig.warn_assign_undef = True' instead. Maintained for backwards compatibility. """ self.warn_assign_undef = True def disable_undef_warnings(self): """ Do 'Kconfig.warn_assign_undef = False' instead. Maintained for backwards compatibility. """ self.warn_assign_undef = False def enable_override_warnings(self): """ Do 'Kconfig.warn_assign_override = True' instead. Maintained for backwards compatibility. """ self.warn_assign_override = True def disable_override_warnings(self): """ Do 'Kconfig.warn_assign_override = False' instead. Maintained for backwards compatibility. """ self.warn_assign_override = False def enable_redun_warnings(self): """ Do 'Kconfig.warn_assign_redun = True' instead. Maintained for backwards compatibility. """ self.warn_assign_redun = True def disable_redun_warnings(self): """ Do 'Kconfig.warn_assign_redun = False' instead. Maintained for backwards compatibility. """ self.warn_assign_redun = False def __repr__(self): """ Returns a string with information about the Kconfig object when it is evaluated on e.g. the interactive Python prompt. """ def status(flag): return "enabled" if flag else "disabled" return "<{}>".format(", ".join(( "configuration with {} symbols".format(len(self.syms)), 'main menu prompt "{}"'.format(self.mainmenu_text), "srctree is current directory" if not self.srctree else 'srctree "{}"'.format(self.srctree), 'config symbol prefix "{}"'.format(self.config_prefix), "warnings " + status(self.warn), "printing of warnings to stderr " + status(self.warn_to_stderr), "undef. symbol assignment warnings " + status(self.warn_assign_undef), "overriding symbol assignment warnings " + status(self.warn_assign_override), "redundant symbol assignment warnings " + status(self.warn_assign_redun) ))) # # Private methods # # # File reading # def _open_config(self, filename): # Opens a .config file. First tries to open 'filename', then # '$srctree/filename' if $srctree was set when the configuration was # loaded. try: return self._open(filename, "r") except EnvironmentError as e: # This will try opening the same file twice if $srctree is unset, # but it's not a big deal try: return self._open(join(self.srctree, filename), "r") except EnvironmentError as e2: # This is needed for Python 3, because e2 is deleted after # the try block: # # https://docs.python.org/3/reference/compound_stmts.html#the-try-statement e = e2 raise _KconfigIOError( e, "Could not open '{}' ({}: {}). Check that the $srctree " "environment variable ({}) is set correctly." .format(filename, errno.errorcode[e.errno], e.strerror, "set to '{}'".format(self.srctree) if self.srctree else "unset or blank")) def _enter_file(self, filename): # Jumps to the beginning of a sourced Kconfig file, saving the previous # position and file object. # # filename: # Absolute path to file # Path relative to $srctree, stored in e.g. self.filename (which makes # it indirectly show up in MenuNode.filename). Equals 'filename' for # absolute paths passed to 'source'. if filename.startswith(self._srctree_prefix): # Relative path (or a redundant absolute path to within $srctree, # but it's probably fine to reduce those too) rel_filename = filename[len(self._srctree_prefix):] else: # Absolute path rel_filename = filename self.kconfig_filenames.append(rel_filename) # The parent Kconfig files are represented as a list of # (<include path>, <Python 'file' object for Kconfig file>) tuples. # # <include path> is immutable and holds a *tuple* of # (<filename>, <linenr>) tuples, giving the locations of the 'source' # statements in the parent Kconfig files. The current include path is # also available in Kconfig._include_path. # # The point of this redundant setup is to allow Kconfig._include_path # to be assigned directly to MenuNode.include_path without having to # copy it, sharing it wherever possible. # Save include path and 'file' object (via its 'readline' function) # before entering the file self._filestack.append((self._include_path, self._readline)) # _include_path is a tuple, so this rebinds the variable instead of # doing in-place modification self._include_path += ((self.filename, self.linenr),) # Check for recursive 'source' for name, _ in self._include_path: if name == rel_filename: raise KconfigError( "\n{}:{}: recursive 'source' of '{}' detected. Check that " "environment variables are set correctly.\n" "Include path:\n{}" .format(self.filename, self.linenr, rel_filename, "\n".join("{}:{}".format(name, linenr) for name, linenr in self._include_path))) try: self._readline = self._open(filename, "r").readline except EnvironmentError as e: # We already know that the file exists raise _KconfigIOError( e, "{}:{}: Could not open '{}' (in '{}') ({}: {})" .format(self.filename, self.linenr, filename, self._line.strip(), errno.errorcode[e.errno], e.strerror)) self.filename = rel_filename self.linenr = 0 def _leave_file(self): # Returns from a Kconfig file to the file that sourced it. See # _enter_file(). # Restore location from parent Kconfig file self.filename, self.linenr = self._include_path[-1] # Restore include path and 'file' object self._readline.__self__.close() # __self__ fetches the 'file' object self._include_path, self._readline = self._filestack.pop() def _next_line(self): # Fetches and tokenizes the next line from the current Kconfig file. # Returns False at EOF and True otherwise. # We might already have tokens from parsing a line and discovering that # it's part of a different construct if self._reuse_tokens: self._reuse_tokens = False # self._tokens_i is known to be 1 here, because _parse_props() # leaves it like that when it can't recognize a line (or parses a # help text) return True # readline() returns '' over and over at EOF, which we rely on for help # texts at the end of files (see _line_after_help()) line = self._readline() if not line: return False self.linenr += 1 # Handle line joining while line.endswith("\\\n"): line = line[:-2] + self._readline() self.linenr += 1 self._tokens = self._tokenize(line) # Initialize to 1 instead of 0 to factor out code from _parse_block() # and _parse_props(). They immediately fetch self._tokens[0]. self._tokens_i = 1 return True def _line_after_help(self, line): # Tokenizes a line after a help text. This case is special in that the # line has already been fetched (to discover that it isn't part of the # help text). # # An earlier version used a _saved_line variable instead that was # checked in _next_line(). This special-casing gets rid of it and makes # _reuse_tokens alone sufficient to handle unget. # Handle line joining while line.endswith("\\\n"): line = line[:-2] + self._readline() self.linenr += 1 self._tokens = self._tokenize(line) self._reuse_tokens = True def _write_if_changed(self, filename, contents): # Writes 'contents' into 'filename', but only if it differs from the # current contents of the file. # # Another variant would be write a temporary file on the same # filesystem, compare the files, and rename() the temporary file if it # differs, but it breaks stuff like write_config("/dev/null"), which is # used out there to force evaluation-related warnings to be generated. # This simple version is pretty failsafe and portable. # # Returns True if the file has changed and is updated, and False # otherwise. if self._contents_eq(filename, contents): return False with self._open(filename, "w") as f: f.write(contents) return True def _contents_eq(self, filename, contents): # Returns True if the contents of 'filename' is 'contents' (a string), # and False otherwise (including if 'filename' can't be opened/read) try: with self._open(filename, "r") as f: # Robust re. things like encoding and line endings (mmap() # trickery isn't) return f.read(len(contents) + 1) == contents except EnvironmentError: # If the error here would prevent writing the file as well, we'll # notice it later return False # # Tokenization # def _lookup_sym(self, name): # Fetches the symbol 'name' from the symbol table, creating and # registering it if it does not exist. If '_parsing_kconfigs' is False, # it means we're in eval_string(), and new symbols won't be registered. if name in self.syms: return self.syms[name] sym = Symbol() sym.kconfig = self sym.name = name sym.is_constant = False sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n if self._parsing_kconfigs: self.syms[name] = sym else: self._warn("no symbol {} in configuration".format(name)) return sym def _lookup_const_sym(self, name): # Like _lookup_sym(), for constant (quoted) symbols if name in self.const_syms: return self.const_syms[name] sym = Symbol() sym.kconfig = self sym.name = name sym.is_constant = True sym.rev_dep = sym.weak_rev_dep = sym.direct_dep = self.n if self._parsing_kconfigs: self.const_syms[name] = sym return sym def _tokenize(self, s): # Parses 's', returning a None-terminated list of tokens. Registers any # new symbols encountered with _lookup(_const)_sym(). # # Tries to be reasonably speedy by processing chunks of text via # regexes and string operations where possible. This is the biggest # hotspot during parsing. # # It might be possible to rewrite this to 'yield' tokens instead, # working across multiple lines. Lookback and compatibility with old # janky versions of the C tools complicate things though. self._line = s # Used for error reporting # Initial token on the line match = _command_match(s) if not match: if s.isspace() or s.lstrip().startswith("#"): return (None,) self._parse_error("unknown token at start of line") # Tricky implementation detail: While parsing a token, 'token' refers # to the previous token. See _STRING_LEX for why this is needed. token = _get_keyword(match.group(1)) if not token: # Backwards compatibility with old versions of the C tools, which # (accidentally) accepted stuff like "--help--" and "-help---". # This was fixed in the C tools by commit c2264564 ("kconfig: warn # of unhandled characters in Kconfig commands"), committed in July # 2015, but it seems people still run Kconfiglib on older kernels. if s.strip(" \t\n-") == "help": return (_T_HELP, None) # If the first token is not a keyword (and not a weird help token), # we have a preprocessor variable assignment (or a bare macro on a # line) self._parse_assignment(s) return (None,) tokens = [token] # The current index in the string being tokenized i = match.end() # Main tokenization loop (for tokens past the first one) while i < len(s): # Test for an identifier/keyword first. This is the most common # case. match = _id_keyword_match(s, i) if match: # We have an identifier or keyword # Check what it is. lookup_sym() will take care of allocating # new symbols for us the first time we see them. Note that # 'token' still refers to the previous token. name = match.group(1) keyword = _get_keyword(name) if keyword: # It's a keyword token = keyword # Jump past it i = match.end() elif token not in _STRING_LEX: # It's a non-const symbol, except we translate n, m, and y # into the corresponding constant symbols, like the C # implementation if "$" in name: # Macro expansion within symbol name name, s, i = self._expand_name(s, i) else: i = match.end() token = self.const_syms[name] if name in STR_TO_TRI else \ self._lookup_sym(name) else: # It's a case of missing quotes. For example, the # following is accepted: # # menu unquoted_title # # config A # tristate unquoted_prompt # # endmenu # # Named choices ('choice FOO') also end up here. if token is not _T_CHOICE: self._warn("style: quotes recommended around '{}' in '{}'" .format(name, self._line.strip()), self.filename, self.linenr) token = name i = match.end() else: # Neither a keyword nor a non-const symbol # We always strip whitespace after tokens, so it is safe to # assume that s[i] is the start of a token here. c = s[i] if c in "\"'": if "$" not in s and "\\" not in s: # Fast path for lines without $ and \. Find the # matching quote. end_i = s.find(c, i + 1) + 1 if not end_i: self._parse_error("unterminated string") val = s[i + 1:end_i - 1] i = end_i else: # Slow path s, end_i = self._expand_str(s, i) # os.path.expandvars() and the $UNAME_RELEASE replace() # is a backwards compatibility hack, which should be # reasonably safe as expandvars() leaves references to # undefined env. vars. as is. # # The preprocessor functionality changed how # environment variables are referenced, to $(FOO). val = expandvars(s[i + 1:end_i - 1] .replace("$UNAME_RELEASE", _UNAME_RELEASE)) i = end_i # This is the only place where we don't survive with a # single token of lookback: 'option env="FOO"' does not # refer to a constant symbol named "FOO". token = \ val if token in _STRING_LEX or tokens[0] is _T_OPTION \ else self._lookup_const_sym(val) elif s.startswith("&&", i): token = _T_AND i += 2 elif s.startswith("||", i): token = _T_OR i += 2 elif c == "=": token = _T_EQUAL i += 1 elif s.startswith("!=", i): token = _T_UNEQUAL i += 2 elif c == "!": token = _T_NOT i += 1 elif c == "(": token = _T_OPEN_PAREN i += 1 elif c == ")": token = _T_CLOSE_PAREN i += 1 elif c == "#": break # Very rare elif s.startswith("<=", i): token = _T_LESS_EQUAL i += 2 elif c == "<": token = _T_LESS i += 1 elif s.startswith(">=", i): token = _T_GREATER_EQUAL i += 2 elif c == ">": token = _T_GREATER i += 1 else: self._parse_error("unknown tokens in line") # Skip trailing whitespace while i < len(s) and s[i].isspace(): i += 1 # Add the token tokens.append(token) # None-terminating the token list makes token fetching simpler/faster tokens.append(None) return tokens # Helpers for syntax checking and token fetching. See the # 'Intro to expressions' section for what a constant symbol is. # # More of these could be added, but the single-use cases are inlined as an # optimization. def _expect_sym(self): token = self._tokens[self._tokens_i] self._tokens_i += 1 if token.__class__ is not Symbol: self._parse_error("expected symbol") return token def _expect_nonconst_sym(self): # Used for 'select' and 'imply' only. We know the token indices. token = self._tokens[1] self._tokens_i = 2 if token.__class__ is not Symbol or token.is_constant: self._parse_error("expected nonconstant symbol") return token def _expect_str_and_eol(self): token = self._tokens[self._tokens_i] self._tokens_i += 1 if token.__class__ is not str: self._parse_error("expected string") if self._tokens[self._tokens_i] is not None: self._trailing_tokens_error() return token def _expect_expr_and_eol(self): expr = self._parse_expr(True) if self._tokens[self._tokens_i] is not None: self._trailing_tokens_error() return expr def _check_token(self, token): # If the next token is 'token', removes it and returns True if self._tokens[self._tokens_i] is token: self._tokens_i += 1 return True return False # # Preprocessor logic # def _parse_assignment(self, s): # Parses a preprocessor variable assignment, registering the variable # if it doesn't already exist. Also takes care of bare macros on lines # (which are allowed, and can be useful for their side effects). # Expand any macros in the left-hand side of the assignment (the # variable name) s = s.lstrip() i = 0 while 1: i = _assignment_lhs_fragment_match(s, i).end() if s.startswith("$(", i): s, i = self._expand_macro(s, i, ()) else: break if s.isspace(): # We also accept a bare macro on a line (e.g. # $(warning-if,$(foo),ops)), provided it expands to a blank string return # Assigned variable name = s[:i] # Extract assignment operator (=, :=, or +=) and value rhs_match = _assignment_rhs_match(s, i) if not rhs_match: self._parse_error("syntax error") op, val = rhs_match.groups() if name in self.variables: # Already seen variable var = self.variables[name] else: # New variable var = Variable() var.kconfig = self var.name = name var._n_expansions = 0 self.variables[name] = var # += acts like = on undefined variables (defines a recursive # variable) if op == "+=": op = "=" if op == "=": var.is_recursive = True var.value = val elif op == ":=": var.is_recursive = False var.value = self._expand_whole(val, ()) else: # op == "+=" # += does immediate expansion if the variable was last set # with := var.value += " " + (val if var.is_recursive else self._expand_whole(val, ())) def _expand_whole(self, s, args): # Expands preprocessor macros in all of 's'. Used whenever we don't # have to worry about delimiters. See _expand_macro() re. the 'args' # parameter. # # Returns the expanded string. i = 0 while 1: i = s.find("$(", i) if i == -1: break s, i = self._expand_macro(s, i, args) return s def _expand_name(self, s, i): # Expands a symbol name starting at index 'i' in 's'. # # Returns the expanded name, the expanded 's' (including the part # before the name), and the index of the first character in the next # token after the name. s, end_i = self._expand_name_iter(s, i) name = s[i:end_i] # isspace() is False for empty strings if not name.strip(): # Avoid creating a Kconfig symbol with a blank name. It's almost # guaranteed to be an error. self._parse_error("macro expanded to blank string") # Skip trailing whitespace while end_i < len(s) and s[end_i].isspace(): end_i += 1 return name, s, end_i def _expand_name_iter(self, s, i): # Expands a symbol name starting at index 'i' in 's'. # # Returns the expanded 's' (including the part before the name) and the # index of the first character after the expanded name in 's'. while 1: match = _name_special_search(s, i) if match.group() != "$(": return (s, match.start()) s, i = self._expand_macro(s, match.start(), ()) def _expand_str(self, s, i): # Expands a quoted string starting at index 'i' in 's'. Handles both # backslash escapes and macro expansion. # # Returns the expanded 's' (including the part before the string) and # the index of the first character after the expanded string in 's'. quote = s[i] i += 1 # Skip over initial "/' while 1: match = _string_special_search(s, i) if not match: self._parse_error("unterminated string") if match.group() == quote: # Found the end of the string return (s, match.end()) elif match.group() == "\\": # Replace '\x' with 'x'. 'i' ends up pointing to the character # after 'x', which allows macros to be canceled with '\$(foo)'. i = match.end() s = s[:match.start()] + s[i:] elif match.group() == "$(": # A macro call within the string s, i = self._expand_macro(s, match.start(), ()) else: # A ' quote within " quotes or vice versa i += 1 def _expand_macro(self, s, i, args): # Expands a macro starting at index 'i' in 's'. If this macro resulted # from the expansion of another macro, 'args' holds the arguments # passed to that macro. # # Returns the expanded 's' (including the part before the macro) and # the index of the first character after the expanded macro in 's'. res = s[:i] i += 2 # Skip over "$(" arg_start = i # Start of current macro argument new_args = [] # Arguments of this macro call nesting = 0 # Current parentheses nesting level while 1: match = _macro_special_search(s, i) if not match: self._parse_error("missing end parenthesis in macro expansion") if match.group() == "(": nesting += 1 i = match.end() elif match.group() == ")": if nesting: nesting -= 1 i = match.end() continue # Found the end of the macro new_args.append(s[arg_start:match.start()]) # $(1) is replaced by the first argument to the function, etc., # provided at least that many arguments were passed try: # Does the macro look like an integer, with a corresponding # argument? If so, expand it to the value of the argument. res += args[int(new_args[0])] except (ValueError, IndexError): # Regular variables are just functions without arguments, # and also go through the function value path res += self._fn_val(new_args) return (res + s[match.end():], len(res)) elif match.group() == ",": i = match.end() if nesting: continue # Found the end of a macro argument new_args.append(s[arg_start:match.start()]) arg_start = i else: # match.group() == "$(" # A nested macro call within the macro s, i = self._expand_macro(s, match.start(), args) def _fn_val(self, args): # Returns the result of calling the function args[0] with the arguments # args[1..len(args)-1]. Plain variables are treated as functions # without arguments. fn = args[0] if fn in self.variables: var = self.variables[fn] if len(args) == 1: # Plain variable if var._n_expansions: self._parse_error("Preprocessor variable {} recursively " "references itself".format(var.name)) elif var._n_expansions > 100: # Allow functions to call themselves, but guess that functions # that are overly recursive are stuck self._parse_error("Preprocessor function {} seems stuck " "in infinite recursion".format(var.name)) var._n_expansions += 1 res = self._expand_whole(self.variables[fn].value, args) var._n_expansions -= 1 return res if fn in self._functions: # Built-in or user-defined function py_fn, min_arg, max_arg = self._functions[fn] if len(args) - 1 < min_arg or \ (max_arg is not None and len(args) - 1 > max_arg): if min_arg == max_arg: expected_args = min_arg elif max_arg is None: expected_args = "{} or more".format(min_arg) else: expected_args = "{}-{}".format(min_arg, max_arg) raise KconfigError("{}:{}: bad number of arguments in call " "to {}, expected {}, got {}" .format(self.filename, self.linenr, fn, expected_args, len(args) - 1)) return py_fn(self, *args) # Environment variables are tried last if fn in os.environ: self.env_vars.add(fn) return os.environ[fn] return "" # # Parsing # def _make_and(self, e1, e2): # Constructs an AND (&&) expression. Performs trivial simplification. if e1 is self.y: return e2 if e2 is self.y: return e1 if e1 is self.n or e2 is self.n: return self.n return (AND, e1, e2) def _make_or(self, e1, e2): # Constructs an OR (||) expression. Performs trivial simplification. if e1 is self.n: return e2 if e2 is self.n: return e1 if e1 is self.y or e2 is self.y: return self.y return (OR, e1, e2) def _parse_block(self, end_token, parent, prev): # Parses a block, which is the contents of either a file or an if, # menu, or choice statement. # # end_token: # The token that ends the block, e.g. _T_ENDIF ("endif") for ifs. # None for files. # # parent: # The parent menu node, corresponding to a menu, Choice, or 'if'. # 'if's are flattened after parsing. # # prev: # The previous menu node. New nodes will be added after this one (by # modifying 'next' pointers). # # 'prev' is reused to parse a list of child menu nodes (for a menu or # Choice): After parsing the children, the 'next' pointer is assigned # to the 'list' pointer to "tilt up" the children above the node. # # Returns the final menu node in the block (or 'prev' if the block is # empty). This allows chaining. while self._next_line(): t0 = self._tokens[0] if t0 is _T_CONFIG or t0 is _T_MENUCONFIG: # The tokenizer allocates Symbol objects for us sym = self._tokens[1] if sym.__class__ is not Symbol or sym.is_constant: self._parse_error("missing or bad symbol name") if self._tokens[2] is not None: self._trailing_tokens_error() self.defined_syms.append(sym) node = MenuNode() node.kconfig = self node.item = sym node.is_menuconfig = (t0 is _T_MENUCONFIG) node.prompt = node.help = node.list = None node.parent = parent node.filename = self.filename node.linenr = self.linenr node.include_path = self._include_path sym.nodes.append(node) self._parse_props(node) if node.is_menuconfig and not node.prompt: self._warn("the menuconfig symbol {} has no prompt" .format(sym.name_and_loc)) # Equivalent to # # prev.next = node # prev = node # # due to tricky Python semantics. The order matters. prev.next = prev = node elif t0 is None: # Blank line continue elif t0 in _SOURCE_TOKENS: pattern = self._expect_str_and_eol() if t0 in _REL_SOURCE_TOKENS: # Relative source pattern = join(dirname(self.filename), pattern) # - glob() doesn't support globbing relative to a directory, so # we need to prepend $srctree to 'pattern'. Use join() # instead of '+' so that an absolute path in 'pattern' is # preserved. # # - Sort the glob results to ensure a consistent ordering of # Kconfig symbols, which indirectly ensures a consistent # ordering in e.g. .config files filenames = sorted(iglob(join(self._srctree_prefix, pattern))) if not filenames and t0 in _OBL_SOURCE_TOKENS: raise KconfigError( "{}:{}: '{}' not found (in '{}'). Check that " "environment variables are set correctly (e.g. " "$srctree, which is {}). Also note that unset " "environment variables expand to the empty string." .format(self.filename, self.linenr, pattern, self._line.strip(), "set to '{}'".format(self.srctree) if self.srctree else "unset or blank")) for filename in filenames: self._enter_file(filename) prev = self._parse_block(None, parent, prev) self._leave_file() elif t0 is end_token: # Reached the end of the block. Terminate the final node and # return it. if self._tokens[1] is not None: self._trailing_tokens_error() prev.next = None return prev elif t0 is _T_IF: node = MenuNode() node.item = node.prompt = None node.parent = parent node.dep = self._expect_expr_and_eol() self._parse_block(_T_ENDIF, node, node) node.list = node.next prev.next = prev = node elif t0 is _T_MENU: node = MenuNode() node.kconfig = self node.item = t0 # _T_MENU == MENU node.is_menuconfig = True node.prompt = (self._expect_str_and_eol(), self.y) node.visibility = self.y node.parent = parent node.filename = self.filename node.linenr = self.linenr node.include_path = self._include_path self.menus.append(node) self._parse_props(node) self._parse_block(_T_ENDMENU, node, node) node.list = node.next prev.next = prev = node elif t0 is _T_COMMENT: node = MenuNode() node.kconfig = self node.item = t0 # _T_COMMENT == COMMENT node.is_menuconfig = False node.prompt = (self._expect_str_and_eol(), self.y) node.list = None node.parent = parent node.filename = self.filename node.linenr = self.linenr node.include_path = self._include_path self.comments.append(node) self._parse_props(node) prev.next = prev = node elif t0 is _T_CHOICE: if self._tokens[1] is None: choice = Choice() choice.direct_dep = self.n else: # Named choice name = self._expect_str_and_eol() choice = self.named_choices.get(name) if not choice: choice = Choice() choice.name = name choice.direct_dep = self.n self.named_choices[name] = choice self.choices.append(choice) node = MenuNode() node.kconfig = choice.kconfig = self node.item = choice node.is_menuconfig = True node.prompt = node.help = None node.parent = parent node.filename = self.filename node.linenr = self.linenr node.include_path = self._include_path choice.nodes.append(node) self._parse_props(node) self._parse_block(_T_ENDCHOICE, node, node) node.list = node.next prev.next = prev = node elif t0 is _T_MAINMENU: self.top_node.prompt = (self._expect_str_and_eol(), self.y) else: # A valid endchoice/endif/endmenu is caught by the 'end_token' # check above self._parse_error( "no corresponding 'choice'" if t0 is _T_ENDCHOICE else "no corresponding 'if'" if t0 is _T_ENDIF else "no corresponding 'menu'" if t0 is _T_ENDMENU else "unrecognized construct") # End of file reached. Return the last node. if end_token: raise KconfigError( "error: expected '{}' at end of '{}'" .format("endchoice" if end_token is _T_ENDCHOICE else "endif" if end_token is _T_ENDIF else "endmenu", self.filename)) return prev def _parse_cond(self): # Parses an optional 'if <expr>' construct and returns the parsed # <expr>, or self.y if the next token is not _T_IF expr = self._parse_expr(True) if self._check_token(_T_IF) else self.y if self._tokens[self._tokens_i] is not None: self._trailing_tokens_error() return expr def _parse_props(self, node): # Parses and adds properties to the MenuNode 'node' (type, 'prompt', # 'default's, etc.) Properties are later copied up to symbols and # choices in a separate pass after parsing, in e.g. # _add_props_to_sym(). # # An older version of this code added properties directly to symbols # and choices instead of to their menu nodes (and handled dependency # propagation simultaneously), but that loses information on where a # property is added when a symbol or choice is defined in multiple # locations. Some Kconfig configuration systems rely heavily on such # symbols, and better docs can be generated by keeping track of where # properties are added. # # node: # The menu node we're parsing properties on # Dependencies from 'depends on'. Will get propagated to the properties # below. node.dep = self.y while self._next_line(): t0 = self._tokens[0] if t0 in _TYPE_TOKENS: # Relies on '_T_BOOL is BOOL', etc., to save a conversion self._set_type(node.item, t0) if self._tokens[1] is not None: self._parse_prompt(node) elif t0 is _T_DEPENDS: if not self._check_token(_T_ON): self._parse_error("expected 'on' after 'depends'") node.dep = self._make_and(node.dep, self._expect_expr_and_eol()) elif t0 is _T_HELP: self._parse_help(node) elif t0 is _T_SELECT: if node.item.__class__ is not Symbol: self._parse_error("only symbols can select") node.selects.append((self._expect_nonconst_sym(), self._parse_cond())) elif t0 is None: # Blank line continue elif t0 is _T_DEFAULT: node.defaults.append((self._parse_expr(False), self._parse_cond())) elif t0 in _DEF_TOKEN_TO_TYPE: self._set_type(node.item, _DEF_TOKEN_TO_TYPE[t0]) node.defaults.append((self._parse_expr(False), self._parse_cond())) elif t0 is _T_PROMPT: self._parse_prompt(node) elif t0 is _T_RANGE: node.ranges.append((self._expect_sym(), self._expect_sym(), self._parse_cond())) elif t0 is _T_IMPLY: if node.item.__class__ is not Symbol: self._parse_error("only symbols can imply") node.implies.append((self._expect_nonconst_sym(), self._parse_cond())) elif t0 is _T_VISIBLE: if not self._check_token(_T_IF): self._parse_error("expected 'if' after 'visible'") node.visibility = self._make_and(node.visibility, self._expect_expr_and_eol()) elif t0 is _T_OPTION: if self._check_token(_T_ENV): if not self._check_token(_T_EQUAL): self._parse_error("expected '=' after 'env'") env_var = self._expect_str_and_eol() node.item.env_var = env_var if env_var in os.environ: node.defaults.append( (self._lookup_const_sym(os.environ[env_var]), self.y)) else: self._warn("{1} has 'option env=\"{0}\"', " "but the environment variable {0} is not " "set".format(node.item.name, env_var), self.filename, self.linenr) if env_var != node.item.name: self._warn("Kconfiglib expands environment variables " "in strings directly, meaning you do not " "need 'option env=...' \"bounce\" symbols. " "For compatibility with the C tools, " "rename {} to {} (so that the symbol name " "matches the environment variable name)." .format(node.item.name, env_var), self.filename, self.linenr) elif self._check_token(_T_DEFCONFIG_LIST): if not self.defconfig_list: self.defconfig_list = node.item else: self._warn("'option defconfig_list' set on multiple " "symbols ({0} and {1}). Only {0} will be " "used.".format(self.defconfig_list.name, node.item.name), self.filename, self.linenr) elif self._check_token(_T_MODULES): # To reduce warning spam, only warn if 'option modules' is # set on some symbol that isn't MODULES, which should be # safe. I haven't run into any projects that make use # modules besides the kernel yet, and there it's likely to # keep being called "MODULES". if node.item is not self.modules: self._warn("the 'modules' option is not supported. " "Let me know if this is a problem for you, " "as it wouldn't be that hard to implement. " "Note that modules are supported -- " "Kconfiglib just assumes the symbol name " "MODULES, like older versions of the C " "implementation did when 'option modules' " "wasn't used.", self.filename, self.linenr) elif self._check_token(_T_ALLNOCONFIG_Y): if node.item.__class__ is not Symbol: self._parse_error("the 'allnoconfig_y' option is only " "valid for symbols") node.item.is_allnoconfig_y = True else: self._parse_error("unrecognized option") elif t0 is _T_OPTIONAL: if node.item.__class__ is not Choice: self._parse_error('"optional" is only valid for choices') node.item.is_optional = True else: # Reuse the tokens for the non-property line later self._reuse_tokens = True return def _set_type(self, sc, new_type): # Sets the type of 'sc' (symbol or choice) to 'new_type' # UNKNOWN is falsy if sc.orig_type and sc.orig_type is not new_type: self._warn("{} defined with multiple types, {} will be used" .format(sc.name_and_loc, TYPE_TO_STR[new_type])) sc.orig_type = new_type def _parse_prompt(self, node): # 'prompt' properties override each other within a single definition of # a symbol, but additional prompts can be added by defining the symbol # multiple times if node.prompt: self._warn(node.item.name_and_loc + " defined with multiple prompts in single location") prompt = self._tokens[1] self._tokens_i = 2 if prompt.__class__ is not str: self._parse_error("expected prompt string") if prompt != prompt.strip(): self._warn(node.item.name_and_loc + " has leading or trailing whitespace in its prompt") # This avoid issues for e.g. reStructuredText documentation, where # '*prompt *' is invalid prompt = prompt.strip() node.prompt = (prompt, self._parse_cond()) def _parse_help(self, node): if node.help is not None: self._warn(node.item.name_and_loc + " defined with more than " "one help text -- only the last one will be used") # Micro-optimization. This code is pretty hot. readline = self._readline # Find first non-blank (not all-space) line and get its # indentation while 1: line = readline() self.linenr += 1 if not line: self._empty_help(node, line) return if not line.isspace(): break len_ = len # Micro-optimization # Use a separate 'expline' variable here and below to avoid stomping on # any tabs people might've put deliberately into the first line after # the help text expline = line.expandtabs() indent = len_(expline) - len_(expline.lstrip()) if not indent: self._empty_help(node, line) return # The help text goes on till the first non-blank line with less indent # than the first line # Add the first line lines = [expline[indent:]] add_line = lines.append # Micro-optimization while 1: line = readline() if line.isspace(): # No need to preserve the exact whitespace in these add_line("\n") elif not line: # End of file break else: expline = line.expandtabs() if len_(expline) - len_(expline.lstrip()) < indent: break add_line(expline[indent:]) self.linenr += len_(lines) node.help = "".join(lines).rstrip() if line: self._line_after_help(line) def _empty_help(self, node, line): self._warn(node.item.name_and_loc + " has 'help' but empty help text") node.help = "" if line: self._line_after_help(line) def _parse_expr(self, transform_m): # Parses an expression from the tokens in Kconfig._tokens using a # simple top-down approach. See the module docstring for the expression # format. # # transform_m: # True if m should be rewritten to m && MODULES. See the # Kconfig.eval_string() documentation. # Grammar: # # expr: and_expr ['||' expr] # and_expr: factor ['&&' and_expr] # factor: <symbol> ['='/'!='/'<'/... <symbol>] # '!' factor # '(' expr ')' # # It helps to think of the 'expr: and_expr' case as a single-operand OR # (no ||), and of the 'and_expr: factor' case as a single-operand AND # (no &&). Parsing code is always a bit tricky. # Mind dump: parse_factor() and two nested loops for OR and AND would # work as well. The straightforward implementation there gives a # (op, (op, (op, A, B), C), D) parse for A op B op C op D. Representing # expressions as (op, [list of operands]) instead goes nicely with that # version, but is wasteful for short expressions and complicates # expression evaluation and other code that works on expressions (more # complicated code likely offsets any performance gain from less # recursion too). If we also try to optimize the list representation by # merging lists when possible (e.g. when ANDing two AND expressions), # we end up allocating a ton of lists instead of reusing expressions, # which is bad. and_expr = self._parse_and_expr(transform_m) # Return 'and_expr' directly if we have a "single-operand" OR. # Otherwise, parse the expression on the right and make an OR node. # This turns A || B || C || D into (OR, A, (OR, B, (OR, C, D))). return and_expr if not self._check_token(_T_OR) else \ (OR, and_expr, self._parse_expr(transform_m)) def _parse_and_expr(self, transform_m): factor = self._parse_factor(transform_m) # Return 'factor' directly if we have a "single-operand" AND. # Otherwise, parse the right operand and make an AND node. This turns # A && B && C && D into (AND, A, (AND, B, (AND, C, D))). return factor if not self._check_token(_T_AND) else \ (AND, factor, self._parse_and_expr(transform_m)) def _parse_factor(self, transform_m): token = self._tokens[self._tokens_i] self._tokens_i += 1 if token.__class__ is Symbol: # Plain symbol or relation if self._tokens[self._tokens_i] not in _RELATIONS: # Plain symbol # For conditional expressions ('depends on <expr>', # '... if <expr>', etc.), m is rewritten to m && MODULES. if transform_m and token is self.m: return (AND, self.m, self.modules) return token # Relation # # _T_EQUAL, _T_UNEQUAL, etc., deliberately have the same values as # EQUAL, UNEQUAL, etc., so we can just use the token directly self._tokens_i += 1 return (self._tokens[self._tokens_i - 1], token, self._expect_sym()) if token is _T_NOT: # token == _T_NOT == NOT return (token, self._parse_factor(transform_m)) if token is _T_OPEN_PAREN: expr_parse = self._parse_expr(transform_m) if self._check_token(_T_CLOSE_PAREN): return expr_parse self._parse_error("malformed expression") # # Caching and invalidation # def _build_dep(self): # Populates the Symbol/Choice._dependents sets, which contain all other # items (symbols and choices) that immediately depend on the item in # the sense that changing the value of the item might affect the value # of the dependent items. This is used for caching/invalidation. # # The calculated sets might be larger than necessary as we don't do any # complex analysis of the expressions. depend_on = _depend_on # Micro-optimization # Only calculate _dependents for defined symbols. Constant and # undefined symbols could theoretically be selected/implied, but it # wouldn't change their value, so it's not a true dependency. for sym in self.unique_defined_syms: # Symbols depend on the following: # The prompt conditions for node in sym.nodes: if node.prompt: depend_on(sym, node.prompt[1]) # The default values and their conditions for value, cond in sym.defaults: depend_on(sym, value) depend_on(sym, cond) # The reverse and weak reverse dependencies depend_on(sym, sym.rev_dep) depend_on(sym, sym.weak_rev_dep) # The ranges along with their conditions for low, high, cond in sym.ranges: depend_on(sym, low) depend_on(sym, high) depend_on(sym, cond) # The direct dependencies. This is usually redundant, as the direct # dependencies get propagated to properties, but it's needed to get # invalidation solid for 'imply', which only checks the direct # dependencies (even if there are no properties to propagate it # to). depend_on(sym, sym.direct_dep) # In addition to the above, choice symbols depend on the choice # they're in, but that's handled automatically since the Choice is # propagated to the conditions of the properties before # _build_dep() runs. for choice in self.unique_choices: # Choices depend on the following: # The prompt conditions for node in choice.nodes: if node.prompt: depend_on(choice, node.prompt[1]) # The default symbol conditions for _, cond in choice.defaults: depend_on(choice, cond) def _add_choice_deps(self): # Choices also depend on the choice symbols themselves, because the # y-mode selection of the choice might change if a choice symbol's # visibility changes. # # We add these dependencies separately after dependency loop detection. # The invalidation algorithm can handle the resulting # <choice symbol> <-> <choice> dependency loops, but they make loop # detection awkward. for choice in self.unique_choices: for sym in choice.syms: sym._dependents.add(choice) def _invalidate_all(self): # Undefined symbols never change value and don't need to be # invalidated, so we can just iterate over defined symbols. # Invalidating constant symbols would break things horribly. for sym in self.unique_defined_syms: sym._invalidate() for choice in self.unique_choices: choice._invalidate() # # Post-parsing menu tree processing, including dependency propagation and # implicit submenu creation # def _finalize_node(self, node, visible_if): # Finalizes a menu node and its children: # # - Copies properties from menu nodes up to their contained # symbols/choices # # - Propagates dependencies from parent to child nodes # # - Creates implicit menus (see kconfig-language.txt) # # - Removes 'if' nodes # # - Sets 'choice' types and registers choice symbols # # menu_finalize() in the C implementation is similar. # # node: # The menu node to finalize. This node and its children will have # been finalized when the function returns, and any implicit menus # will have been created. # # visible_if: # Dependencies from 'visible if' on parent menus. These are added to # the prompts of symbols and choices. if node.item.__class__ is Symbol: # Copy defaults, ranges, selects, and implies to the Symbol self._add_props_to_sym(node) # Find any items that should go in an implicit menu rooted at the # symbol cur = node while cur.next and _auto_menu_dep(node, cur.next): # This makes implicit submenu creation work recursively, with # implicit menus inside implicit menus self._finalize_node(cur.next, visible_if) cur = cur.next cur.parent = node if cur is not node: # Found symbols that should go in an implicit submenu. Tilt # them up above us. node.list = node.next node.next = cur.next cur.next = None elif node.list: # The menu node is a choice, menu, or if. Finalize each child node. if node.item is MENU: visible_if = self._make_and(visible_if, node.visibility) # Propagate the menu node's dependencies to each child menu node. # # This needs to go before the recursive _finalize_node() call so # that implicit submenu creation can look ahead at dependencies. self._propagate_deps(node, visible_if) # Finalize the children cur = node.list while cur: self._finalize_node(cur, visible_if) cur = cur.next if node.list: # node's children have been individually finalized. Do final steps # to finalize this "level" in the menu tree. _flatten(node.list) _remove_ifs(node) # Empty choices (node.list None) are possible, so this needs to go # outside if node.item.__class__ is Choice: # Add the node's non-node-specific properties to the choice, like # _add_props_to_sym() does choice = node.item choice.direct_dep = self._make_or(choice.direct_dep, node.dep) choice.defaults += node.defaults _finalize_choice(node) def _propagate_deps(self, node, visible_if): # Propagates 'node's dependencies to its child menu nodes # If the parent node holds a Choice, we use the Choice itself as the # parent dependency. This makes sense as the value (mode) of the choice # limits the visibility of the contained choice symbols. The C # implementation works the same way. # # Due to the similar interface, Choice works as a drop-in replacement # for Symbol here. basedep = node.item if node.item.__class__ is Choice else node.dep cur = node.list while cur: dep = cur.dep = self._make_and(cur.dep, basedep) if cur.item.__class__ in _SYMBOL_CHOICE: # Propagate 'visible if' and dependencies to the prompt if cur.prompt: cur.prompt = (cur.prompt[0], self._make_and( cur.prompt[1], self._make_and(visible_if, dep))) # Propagate dependencies to defaults if cur.defaults: cur.defaults = [(default, self._make_and(cond, dep)) for default, cond in cur.defaults] # Propagate dependencies to ranges if cur.ranges: cur.ranges = [(low, high, self._make_and(cond, dep)) for low, high, cond in cur.ranges] # Propagate dependencies to selects if cur.selects: cur.selects = [(target, self._make_and(cond, dep)) for target, cond in cur.selects] # Propagate dependencies to implies if cur.implies: cur.implies = [(target, self._make_and(cond, dep)) for target, cond in cur.implies] elif cur.prompt: # Not a symbol/choice # Propagate dependencies to the prompt. 'visible if' is only # propagated to symbols/choices. cur.prompt = (cur.prompt[0], self._make_and(cur.prompt[1], dep)) cur = cur.next def _add_props_to_sym(self, node): # Copies properties from the menu node 'node' up to its contained # symbol, and adds (weak) reverse dependencies to selected/implied # symbols. # # This can't be rolled into _propagate_deps(), because that function # traverses the menu tree roughly breadth-first, meaning properties on # symbols defined in multiple locations could end up in the wrong # order. sym = node.item # See the Symbol class docstring sym.direct_dep = self._make_or(sym.direct_dep, node.dep) sym.defaults += node.defaults sym.ranges += node.ranges sym.selects += node.selects sym.implies += node.implies # Modify the reverse dependencies of the selected symbol for target, cond in node.selects: target.rev_dep = self._make_or( target.rev_dep, self._make_and(sym, cond)) # Modify the weak reverse dependencies of the implied # symbol for target, cond in node.implies: target.weak_rev_dep = self._make_or( target.weak_rev_dep, self._make_and(sym, cond)) # # Misc. # def _check_sym_sanity(self): # Checks various symbol properties that are handiest to check after # parsing. Only generates errors and warnings. def num_ok(sym, type_): # Returns True if the (possibly constant) symbol 'sym' is valid as a value # for a symbol of type type_ (INT or HEX) # 'not sym.nodes' implies a constant or undefined symbol, e.g. a plain # "123" if not sym.nodes: return _is_base_n(sym.name, _TYPE_TO_BASE[type_]) return sym.orig_type is type_ for sym in self.unique_defined_syms: if sym.orig_type in _BOOL_TRISTATE: # A helper function could be factored out here, but keep it # speedy/straightforward for target_sym, _ in sym.selects: if target_sym.orig_type not in _BOOL_TRISTATE_UNKNOWN: self._warn("{} selects the {} symbol {}, which is not " "bool or tristate" .format(sym.name_and_loc, TYPE_TO_STR[target_sym.orig_type], target_sym.name_and_loc)) for target_sym, _ in sym.implies: if target_sym.orig_type not in _BOOL_TRISTATE_UNKNOWN: self._warn("{} implies the {} symbol {}, which is not " "bool or tristate" .format(sym.name_and_loc, TYPE_TO_STR[target_sym.orig_type], target_sym.name_and_loc)) elif sym.orig_type: # STRING/INT/HEX for default, _ in sym.defaults: if default.__class__ is not Symbol: raise KconfigError( "the {} symbol {} has a malformed default {} -- " "expected a single symbol" .format(TYPE_TO_STR[sym.orig_type], sym.name_and_loc, expr_str(default))) if sym.orig_type is STRING: if not default.is_constant and not default.nodes and \ not default.name.isupper(): # 'default foo' on a string symbol could be either a symbol # reference or someone leaving out the quotes. Guess that # the quotes were left out if 'foo' isn't all-uppercase # (and no symbol named 'foo' exists). self._warn("style: quotes recommended around " "default value for string symbol " + sym.name_and_loc) elif not num_ok(default, sym.orig_type): # INT/HEX self._warn("the {0} symbol {1} has a non-{0} default {2}" .format(TYPE_TO_STR[sym.orig_type], sym.name_and_loc, default.name_and_loc)) if sym.selects or sym.implies: self._warn("the {} symbol {} has selects or implies" .format(TYPE_TO_STR[sym.orig_type], sym.name_and_loc)) else: # UNKNOWN self._warn("{} defined without a type" .format(sym.name_and_loc)) if sym.ranges: if sym.orig_type not in _INT_HEX: self._warn( "the {} symbol {} has ranges, but is not int or hex" .format(TYPE_TO_STR[sym.orig_type], sym.name_and_loc)) else: for low, high, _ in sym.ranges: if not num_ok(low, sym.orig_type) or \ not num_ok(high, sym.orig_type): self._warn("the {0} symbol {1} has a non-{0} " "range [{2}, {3}]" .format(TYPE_TO_STR[sym.orig_type], sym.name_and_loc, low.name_and_loc, high.name_and_loc)) def _check_choice_sanity(self): # Checks various choice properties that are handiest to check after # parsing. Only generates errors and warnings. def warn_select_imply(sym, expr, expr_type): msg = "the choice symbol {} is {} by the following symbols, but " \ "select/imply has no effect on choice symbols" \ .format(sym.name_and_loc, expr_type) # si = select/imply for si in split_expr(expr, OR): msg += "\n - " + split_expr(si, AND)[0].name_and_loc self._warn(msg) for choice in self.unique_choices: if choice.orig_type not in _BOOL_TRISTATE: self._warn("{} defined with type {}" .format(choice.name_and_loc, TYPE_TO_STR[choice.orig_type])) for node in choice.nodes: if node.prompt: break else: self._warn(choice.name_and_loc + " defined without a prompt") for default, _ in choice.defaults: if default.__class__ is not Symbol: raise KconfigError( "{} has a malformed default {}" .format(choice.name_and_loc, expr_str(default))) if default.choice is not choice: self._warn("the default selection {} of {} is not " "contained in the choice" .format(default.name_and_loc, choice.name_and_loc)) for sym in choice.syms: if sym.defaults: self._warn("default on the choice symbol {} will have " "no effect, as defaults do not affect choice " "symbols".format(sym.name_and_loc)) if sym.rev_dep is not sym.kconfig.n: warn_select_imply(sym, sym.rev_dep, "selected") if sym.weak_rev_dep is not sym.kconfig.n: warn_select_imply(sym, sym.weak_rev_dep, "implied") for node in sym.nodes: if node.parent.item is choice: if not node.prompt: self._warn("the choice symbol {} has no prompt" .format(sym.name_and_loc)) elif node.prompt: self._warn("the choice symbol {} is defined with a " "prompt outside the choice" .format(sym.name_and_loc)) def _parse_error(self, msg): raise KconfigError("{}error: couldn't parse '{}': {}".format( "" if self.filename is None else "{}:{}: ".format(self.filename, self.linenr), self._line.strip(), msg)) def _trailing_tokens_error(self): self._parse_error("extra tokens at end of line") def _open(self, filename, mode): # open() wrapper: # # - Enable universal newlines mode on Python 2 to ease # interoperability between Linux and Windows. It's already the # default on Python 3. # # The "U" flag would currently work for both Python 2 and 3, but it's # deprecated on Python 3, so play it future-safe. # # io.open() defaults to universal newlines on Python 2 (and is an # alias for open() on Python 3), but it returns 'unicode' strings and # slows things down: # # Parsing x86 Kconfigs on Python 2 # # with open(..., "rU"): # # real 0m0.930s # user 0m0.905s # sys 0m0.025s # # with io.open(): # # real 0m1.069s # user 0m1.040s # sys 0m0.029s # # There's no appreciable performance difference between "r" and # "rU" for parsing performance on Python 2. # # - For Python 3, force the encoding. Forcing the encoding on Python 2 # turns strings into Unicode strings, which gets messy. Python 2 # doesn't decode regular strings anyway. return open(filename, "rU" if mode == "r" else mode) if _IS_PY2 else \ open(filename, mode, encoding=self._encoding) def _check_undef_syms(self): # Prints warnings for all references to undefined symbols within the # Kconfig files def is_num(s): # Returns True if the string 's' looks like a number. # # Internally, all operands in Kconfig are symbols, only undefined symbols # (which numbers usually are) get their name as their value. # # Only hex numbers that start with 0x/0X are classified as numbers. # Otherwise, symbols whose names happen to contain only the letters A-F # would trigger false positives. try: int(s) except ValueError: if not s.startswith(("0x", "0X")): return False try: int(s, 16) except ValueError: return False return True for sym in (self.syms.viewvalues if _IS_PY2 else self.syms.values)(): # - sym.nodes empty means the symbol is undefined (has no # definition locations) # # - Due to Kconfig internals, numbers show up as undefined Kconfig # symbols, but shouldn't be flagged # # - The MODULES symbol always exists if not sym.nodes and not is_num(sym.name) and \ sym.name != "MODULES": msg = "undefined symbol {}:".format(sym.name) for node in self.node_iter(): if sym in node.referenced: msg += "\n\n- Referenced at {}:{}:\n\n{}" \ .format(node.filename, node.linenr, node) self._warn(msg) def _warn(self, msg, filename=None, linenr=None): # For printing general warnings if not self.warn: return msg = "warning: " + msg if filename is not None: msg = "{}:{}: {}".format(filename, linenr, msg) self.warnings.append(msg) if self.warn_to_stderr: sys.stderr.write(msg + "\n") class Symbol(object): """ Represents a configuration symbol: (menu)config FOO ... The following attributes are available. They should be viewed as read-only, and some are implemented through @property magic (but are still efficient to access due to internal caching). Note: Prompts, help texts, and locations are stored in the Symbol's MenuNode(s) rather than in the Symbol itself. Check the MenuNode class and the Symbol.nodes attribute. This organization matches the C tools. name: The name of the symbol, e.g. "FOO" for 'config FOO'. type: The type of the symbol. One of BOOL, TRISTATE, STRING, INT, HEX, UNKNOWN. UNKNOWN is for undefined symbols, (non-special) constant symbols, and symbols defined without a type. When running without modules (MODULES having the value n), TRISTATE symbols magically change type to BOOL. This also happens for symbols within choices in "y" mode. This matches the C tools, and makes sense for menuconfig-like functionality. orig_type: The type as given in the Kconfig file, without any magic applied. Used when printing the symbol. tri_value: The tristate value of the symbol as an integer. One of 0, 1, 2, representing n, m, y. Always 0 (n) for non-bool/tristate symbols. This is the symbol value that's used outside of relation expressions (A, !A, A && B, A || B). str_value: The value of the symbol as a string. Gives the value for string/int/hex symbols. For bool/tristate symbols, gives "n", "m", or "y". This is the symbol value that's used in relational expressions (A = B, A != B, etc.) Gotcha: For int/hex symbols, the exact format of the value is often preserved (e.g. when writing a .config file), hence why you can't get it directly as an int. Do int(int_sym.str_value) or int(hex_sym.str_value, 16) to get the integer value. user_value: The user value of the symbol. None if no user value has been assigned (via Kconfig.load_config() or Symbol.set_value()). Holds 0, 1, or 2 for bool/tristate symbols, and a string for the other symbol types. WARNING: Do not assign directly to this. It will break things. Use Symbol.set_value(). assignable: A tuple containing the tristate user values that can currently be assigned to the symbol (that would be respected), ordered from lowest (0, representing n) to highest (2, representing y). This corresponds to the selections available in the menuconfig interface. The set of assignable values is calculated from the symbol's visibility and selects/implies. Returns the empty set for non-bool/tristate symbols and for symbols with visibility n. The other possible values are (0, 2), (0, 1, 2), (1, 2), (1,), and (2,). A (1,) or (2,) result means the symbol is visible but "locked" to m or y through a select, perhaps in combination with the visibility. menuconfig represents this as -M- and -*-, respectively. For string/hex/int symbols, check if Symbol.visibility is non-0 (non-n) instead to determine if the value can be changed. Some handy 'assignable' idioms: # Is 'sym' an assignable (visible) bool/tristate symbol? if sym.assignable: # What's the highest value it can be assigned? [-1] in Python # gives the last element. sym_high = sym.assignable[-1] # The lowest? sym_low = sym.assignable[0] # Can the symbol be set to at least m? if sym.assignable[-1] >= 1: ... # Can the symbol be set to m? if 1 in sym.assignable: ... visibility: The visibility of the symbol. One of 0, 1, 2, representing n, m, y. See the module documentation for an overview of symbol values and visibility. config_string: The .config assignment string that would get written out for the symbol by Kconfig.write_config(). Returns the empty string if no .config assignment would get written out. In general, visible symbols, symbols with (active) defaults, and selected symbols get written out. This includes all non-n-valued bool/tristate symbols, and all visible string/int/hex symbols. Symbols with the (no longer needed) 'option env=...' option generate no configuration output, and neither does the special 'option defconfig_list' symbol. Tip: This field is useful when generating custom configuration output, even for non-.config-like formats. To write just the symbols that would get written out to .config files, do this: if sym.config_string: *Write symbol, e.g. by looking sym.str_value* This is a superset of the symbols written out by write_autoconf(). That function skips all n-valued symbols. There usually won't be any great harm in just writing all symbols either, though you might get some special symbols and possibly some "redundant" n-valued symbol entries in there. name_and_loc: Holds a string like "MY_SYMBOL (defined at foo/Kconfig:12, bar/Kconfig:14)" , giving the name of the symbol and its definition location(s). If the symbol is undefined, the location is given as "(undefined)". nodes: A list of MenuNodes for this symbol. Will contain a single MenuNode for most symbols. Undefined and constant symbols have an empty nodes list. Symbols defined in multiple locations get one node for each location. choice: Holds the parent Choice for choice symbols, and None for non-choice symbols. Doubles as a flag for whether a symbol is a choice symbol. defaults: List of (default, cond) tuples for the symbol's 'default' properties. For example, 'default A && B if C || D' is represented as ((AND, A, B), (OR, C, D)). If no condition was given, 'cond' is self.kconfig.y. Note that 'depends on' and parent dependencies are propagated to 'default' conditions. selects: List of (symbol, cond) tuples for the symbol's 'select' properties. For example, 'select A if B && C' is represented as (A, (AND, B, C)). If no condition was given, 'cond' is self.kconfig.y. Note that 'depends on' and parent dependencies are propagated to 'select' conditions. implies: Like 'selects', for imply. ranges: List of (low, high, cond) tuples for the symbol's 'range' properties. For example, 'range 1 2 if A' is represented as (1, 2, A). If there is no condition, 'cond' is self.kconfig.y. Note that 'depends on' and parent dependencies are propagated to 'range' conditions. Gotcha: 1 and 2 above will be represented as (undefined) Symbols rather than plain integers. Undefined symbols get their name as their string value, so this works out. The C tools work the same way. orig_defaults: orig_selects: orig_implies: orig_ranges: See the corresponding attributes on the MenuNode class. rev_dep: Reverse dependency expression from other symbols selecting this symbol. Multiple selections get ORed together. A condition on a select is ANDed with the selecting symbol. For example, if A has 'select FOO' and B has 'select FOO if C', then FOO's rev_dep will be (OR, A, (AND, B, C)). weak_rev_dep: Like rev_dep, for imply. direct_dep: The direct ('depends on') dependencies for the symbol, or self.kconfig.y if there are no direct dependencies. This attribute includes any dependencies from surrounding menus and ifs. Those get propagated to the direct dependencies, and the resulting direct dependencies in turn get propagated to the conditions of all properties. If the symbol is defined in multiple locations, the dependencies from the different locations get ORed together. referenced: A set() with all symbols and choices referenced in the properties and property conditions of the symbol. Also includes dependencies from surrounding menus and ifs, because those get propagated to the symbol (see the 'Intro to symbol values' section in the module docstring). Choices appear in the dependencies of choice symbols. For the following definitions, only B and not C appears in A's 'referenced'. To get transitive references, you'll have to recursively expand 'references' until no new items appear. config A bool depends on B config B bool depends on C config C bool See the Symbol.direct_dep attribute if you're only interested in the direct dependencies of the symbol (its 'depends on'). You can extract the symbols in it with the global expr_items() function. env_var: If the Symbol has an 'option env="FOO"' option, this contains the name ("FOO") of the environment variable. None for symbols without no 'option env'. 'option env="FOO"' acts like a 'default' property whose value is the value of $FOO. Symbols with 'option env' are never written out to .config files, even if they are visible. env_var corresponds to a flag called SYMBOL_AUTO in the C implementation. is_allnoconfig_y: True if the symbol has 'option allnoconfig_y' set on it. This has no effect internally (except when printing symbols), but can be checked by scripts. is_constant: True if the symbol is a constant (quoted) symbol. kconfig: The Kconfig instance this symbol is from. """ __slots__ = ( "_cached_assignable", "_cached_str_val", "_cached_tri_val", "_cached_vis", "_dependents", "_old_val", "_visited", "_was_set", "_write_to_conf", "choice", "defaults", "direct_dep", "env_var", "implies", "is_allnoconfig_y", "is_constant", "kconfig", "name", "nodes", "orig_type", "ranges", "rev_dep", "selects", "user_value", "weak_rev_dep", ) # # Public interface # @property def type(self): """ See the class documentation. """ if self.orig_type is TRISTATE and \ (self.choice and self.choice.tri_value == 2 or not self.kconfig.modules.tri_value): return BOOL return self.orig_type @property def str_value(self): """ See the class documentation. """ if self._cached_str_val is not None: return self._cached_str_val if self.orig_type in _BOOL_TRISTATE: # Also calculates the visibility, so invalidation safe self._cached_str_val = TRI_TO_STR[self.tri_value] return self._cached_str_val # As a quirk of Kconfig, undefined symbols get their name as their # string value. This is why things like "FOO = bar" work for seeing if # FOO has the value "bar". if not self.orig_type: # UNKNOWN self._cached_str_val = self.name return self.name val = "" # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) vis = self.visibility self._write_to_conf = (vis != 0) if self.orig_type in _INT_HEX: # The C implementation checks the user value against the range in a # separate code path (post-processing after loading a .config). # Checking all values here instead makes more sense for us. It # requires that we check for a range first. base = _TYPE_TO_BASE[self.orig_type] # Check if a range is in effect for low_expr, high_expr, cond in self.ranges: if expr_value(cond): has_active_range = True # The zeros are from the C implementation running strtoll() # on empty strings low = int(low_expr.str_value, base) if \ _is_base_n(low_expr.str_value, base) else 0 high = int(high_expr.str_value, base) if \ _is_base_n(high_expr.str_value, base) else 0 break else: has_active_range = False # Defaults are used if the symbol is invisible, lacks a user value, # or has an out-of-range user value use_defaults = True if vis and self.user_value: user_val = int(self.user_value, base) if has_active_range and not low <= user_val <= high: num2str = str if base == 10 else hex self.kconfig._warn( "user value {} on the {} symbol {} ignored due to " "being outside the active range ([{}, {}]) -- falling " "back on defaults" .format(num2str(user_val), TYPE_TO_STR[self.orig_type], self.name_and_loc, num2str(low), num2str(high))) else: # If the user value is well-formed and satisfies range # contraints, it is stored in exactly the same form as # specified in the assignment (with or without "0x", etc.) val = self.user_value use_defaults = False if use_defaults: # No user value or invalid user value. Look at defaults. # Used to implement the warning below has_default = False for sym, cond in self.defaults: if expr_value(cond): has_default = self._write_to_conf = True val = sym.str_value if _is_base_n(val, base): val_num = int(val, base) else: val_num = 0 # strtoll() on empty string break else: val_num = 0 # strtoll() on empty string # This clamping procedure runs even if there's no default if has_active_range: clamp = None if val_num < low: clamp = low elif val_num > high: clamp = high if clamp is not None: # The value is rewritten to a standard form if it is # clamped val = str(clamp) \ if self.orig_type is INT else \ hex(clamp) if has_default: num2str = str if base == 10 else hex self.kconfig._warn( "default value {} on {} clamped to {} due to " "being outside the active range ([{}, {}])" .format(val_num, self.name_and_loc, num2str(clamp), num2str(low), num2str(high))) elif self.orig_type is STRING: if vis and self.user_value is not None: # If the symbol is visible and has a user value, use that val = self.user_value else: # Otherwise, look at defaults for sym, cond in self.defaults: if expr_value(cond): val = sym.str_value self._write_to_conf = True break # env_var corresponds to SYMBOL_AUTO in the C implementation, and is # also set on the defconfig_list symbol there. Test for the # defconfig_list symbol explicitly instead here, to avoid a nonsensical # env_var setting and the defconfig_list symbol being printed # incorrectly. This code is pretty cold anyway. if self.env_var is not None or self is self.kconfig.defconfig_list: self._write_to_conf = False self._cached_str_val = val return val @property def tri_value(self): """ See the class documentation. """ if self._cached_tri_val is not None: return self._cached_tri_val if self.orig_type not in _BOOL_TRISTATE: if self.orig_type: # != UNKNOWN # Would take some work to give the location here self.kconfig._warn( "The {} symbol {} is being evaluated in a logical context " "somewhere. It will always evaluate to n." .format(TYPE_TO_STR[self.orig_type], self.name_and_loc)) self._cached_tri_val = 0 return 0 # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) vis = self.visibility self._write_to_conf = (vis != 0) val = 0 if not self.choice: # Non-choice symbol if vis and self.user_value is not None: # If the symbol is visible and has a user value, use that val = min(self.user_value, vis) else: # Otherwise, look at defaults and weak reverse dependencies # (implies) for default, cond in self.defaults: dep_val = expr_value(cond) if dep_val: val = min(expr_value(default), dep_val) if val: self._write_to_conf = True break # Weak reverse dependencies are only considered if our # direct dependencies are met dep_val = expr_value(self.weak_rev_dep) if dep_val and expr_value(self.direct_dep): val = max(dep_val, val) self._write_to_conf = True # Reverse (select-related) dependencies take precedence dep_val = expr_value(self.rev_dep) if dep_val: if expr_value(self.direct_dep) < dep_val: self._warn_select_unsatisfied_deps() val = max(dep_val, val) self._write_to_conf = True # m is promoted to y for (1) bool symbols and (2) symbols with a # weak_rev_dep (from imply) of y if val == 1 and \ (self.type is BOOL or expr_value(self.weak_rev_dep) == 2): val = 2 elif vis == 2: # Visible choice symbol in y-mode choice. The choice mode limits # the visibility of choice symbols, so it's sufficient to just # check the visibility of the choice symbols themselves. val = 2 if self.choice.selection is self else 0 elif vis and self.user_value: # Visible choice symbol in m-mode choice, with set non-0 user value val = 1 self._cached_tri_val = val return val @property def assignable(self): """ See the class documentation. """ if self._cached_assignable is None: self._cached_assignable = self._assignable() return self._cached_assignable @property def visibility(self): """ See the class documentation. """ if self._cached_vis is None: self._cached_vis = _visibility(self) return self._cached_vis @property def config_string(self): """ See the class documentation. """ # _write_to_conf is determined when the value is calculated. This is a # hidden function call due to property magic. val = self.str_value if not self._write_to_conf: return "" if self.orig_type in _BOOL_TRISTATE: return "{}{}={}\n" \ .format(self.kconfig.config_prefix, self.name, val) \ if val != "n" else \ "# {}{} is not set\n" \ .format(self.kconfig.config_prefix, self.name) if self.orig_type in _INT_HEX: return "{}{}={}\n" \ .format(self.kconfig.config_prefix, self.name, val) # sym.orig_type is STRING return '{}{}="{}"\n' \ .format(self.kconfig.config_prefix, self.name, escape(val)) @property def name_and_loc(self): """ See the class documentation. """ return self.name + " " + _locs(self) def set_value(self, value): """ Sets the user value of the symbol. Equal in effect to assigning the value to the symbol within a .config file. For bool and tristate symbols, use the 'assignable' attribute to check which values can currently be assigned. Setting values outside 'assignable' will cause Symbol.user_value to differ from Symbol.str/tri_value (be truncated down or up). Setting a choice symbol to 2 (y) sets Choice.user_selection to the choice symbol in addition to setting Symbol.user_value. Choice.user_selection is considered when the choice is in y mode (the "normal" mode). Other symbols that depend (possibly indirectly) on this symbol are automatically recalculated to reflect the assigned value. value: The user value to give to the symbol. For bool and tristate symbols, n/m/y can be specified either as 0/1/2 (the usual format for tristate values in Kconfiglib) or as one of the strings "n", "m", or "y". For other symbol types, pass a string. Note that the value for an int/hex symbol is passed as a string, e.g. "123" or "0x0123". The format of this string is preserved in the output. Values that are invalid for the type (such as "foo" or 1 (m) for a BOOL or "0x123" for an INT) are ignored and won't be stored in Symbol.user_value. Kconfiglib will print a warning by default for invalid assignments, and set_value() will return False. Returns True if the value is valid for the type of the symbol, and False otherwise. This only looks at the form of the value. For BOOL and TRISTATE symbols, check the Symbol.assignable attribute to see what values are currently in range and would actually be reflected in the value of the symbol. For other symbol types, check whether the visibility is non-n. """ if self.orig_type in _BOOL_TRISTATE and value in STR_TO_TRI: value = STR_TO_TRI[value] # If the new user value matches the old, nothing changes, and we can # avoid invalidating cached values. # # This optimization is skipped for choice symbols: Setting a choice # symbol's user value to y might change the state of the choice, so it # wouldn't be safe (symbol user values always match the values set in a # .config file or via set_value(), and are never implicitly updated). if value == self.user_value and not self.choice: self._was_set = True return True # Check if the value is valid for our type if not (self.orig_type is BOOL and value in (2, 0) or self.orig_type is TRISTATE and value in TRI_TO_STR or value.__class__ is str and (self.orig_type is STRING or self.orig_type is INT and _is_base_n(value, 10) or self.orig_type is HEX and _is_base_n(value, 16) and int(value, 16) >= 0)): # Display tristate values as n, m, y in the warning self.kconfig._warn( "the value {} is invalid for {}, which has type {} -- " "assignment ignored" .format(TRI_TO_STR[value] if value in TRI_TO_STR else "'{}'".format(value), self.name_and_loc, TYPE_TO_STR[self.orig_type])) return False self.user_value = value self._was_set = True if self.choice and value == 2: # Setting a choice symbol to y makes it the user selection of the # choice. Like for symbol user values, the user selection is not # guaranteed to match the actual selection of the choice, as # dependencies come into play. self.choice.user_selection = self self.choice._was_set = True self.choice._rec_invalidate() else: self._rec_invalidate_if_has_prompt() return True def unset_value(self): """ Removes any user value from the symbol, as if the symbol had never gotten a user value via Kconfig.load_config() or Symbol.set_value(). """ if self.user_value is not None: self.user_value = None self._rec_invalidate_if_has_prompt() @property def referenced(self): """ See the class documentation. """ return {item for node in self.nodes for item in node.referenced} @property def orig_defaults(self): """ See the class documentation. """ return [d for node in self.nodes for d in node.orig_defaults] @property def orig_selects(self): """ See the class documentation. """ return [s for node in self.nodes for s in node.orig_selects] @property def orig_implies(self): """ See the class documentation. """ return [i for node in self.nodes for i in node.orig_implies] @property def orig_ranges(self): """ See the class documentation. """ return [r for node in self.nodes for r in node.orig_ranges] def __repr__(self): """ Returns a string with information about the symbol (including its name, value, visibility, and location(s)) when it is evaluated on e.g. the interactive Python prompt. """ fields = ["symbol " + self.name, TYPE_TO_STR[self.type]] add = fields.append for node in self.nodes: if node.prompt: add('"{}"'.format(node.prompt[0])) # Only add quotes for non-bool/tristate symbols add("value " + (self.str_value if self.orig_type in _BOOL_TRISTATE else '"{}"'.format(self.str_value))) if not self.is_constant: # These aren't helpful to show for constant symbols if self.user_value is not None: # Only add quotes for non-bool/tristate symbols add("user value " + (TRI_TO_STR[self.user_value] if self.orig_type in _BOOL_TRISTATE else '"{}"'.format(self.user_value))) add("visibility " + TRI_TO_STR[self.visibility]) if self.choice: add("choice symbol") if self.is_allnoconfig_y: add("allnoconfig_y") if self is self.kconfig.defconfig_list: add("is the defconfig_list symbol") if self.env_var is not None: add("from environment variable " + self.env_var) if self is self.kconfig.modules: add("is the modules symbol") add("direct deps " + TRI_TO_STR[expr_value(self.direct_dep)]) if self.nodes: for node in self.nodes: add("{}:{}".format(node.filename, node.linenr)) else: add("constant" if self.is_constant else "undefined") return "<{}>".format(", ".join(fields)) def __str__(self): """ Returns a string representation of the symbol when it is printed. Matches the Kconfig format, with any parent dependencies propagated to the 'depends on' condition. The string is constructed by joining the strings returned by MenuNode.__str__() for each of the symbol's menu nodes, so symbols defined in multiple locations will return a string with all definitions. The returned string does not end in a newline. An empty string is returned for undefined and constant symbols. """ return self.custom_str(standard_sc_expr_str) def custom_str(self, sc_expr_str_fn): """ Works like Symbol.__str__(), but allows a custom format to be used for all symbol/choice references. See expr_str(). """ return "\n\n".join(node.custom_str(sc_expr_str_fn) for node in self.nodes) # # Private methods # def __init__(self): """ Symbol constructor -- not intended to be called directly by Kconfiglib clients. """ # These attributes are always set on the instance from outside and # don't need defaults: # kconfig # direct_dep # is_constant # name # rev_dep # weak_rev_dep # - UNKNOWN == 0 # - _visited is used during tree iteration and dep. loop detection self.orig_type = self._visited = 0 self.nodes = [] self.defaults = [] self.selects = [] self.implies = [] self.ranges = [] self.user_value = \ self.choice = \ self.env_var = \ self._cached_str_val = self._cached_tri_val = self._cached_vis = \ self._cached_assignable = None # _write_to_conf is calculated along with the value. If True, the # Symbol gets a .config entry. self.is_allnoconfig_y = \ self._was_set = \ self._write_to_conf = False # See Kconfig._build_dep() self._dependents = set() def _assignable(self): # Worker function for the 'assignable' attribute if self.orig_type not in _BOOL_TRISTATE: return () # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) vis = self.visibility if not vis: return () rev_dep_val = expr_value(self.rev_dep) if vis == 2: if self.choice: return (2,) if not rev_dep_val: if self.type is BOOL or expr_value(self.weak_rev_dep) == 2: return (0, 2) return (0, 1, 2) if rev_dep_val == 2: return (2,) # rev_dep_val == 1 if self.type is BOOL or expr_value(self.weak_rev_dep) == 2: return (2,) return (1, 2) # vis == 1 # Must be a tristate here, because bool m visibility gets promoted to y if not rev_dep_val: return (0, 1) if expr_value(self.weak_rev_dep) != 2 else (0, 2) if rev_dep_val == 2: return (2,) # vis == rev_dep_val == 1 return (1,) def _invalidate(self): # Marks the symbol as needing to be recalculated self._cached_str_val = self._cached_tri_val = self._cached_vis = \ self._cached_assignable = None def _rec_invalidate(self): # Invalidates the symbol and all items that (possibly) depend on it if self is self.kconfig.modules: # Invalidating MODULES has wide-ranging effects self.kconfig._invalidate_all() else: self._invalidate() for item in self._dependents: # _cached_vis doubles as a flag that tells us whether 'item' # has cached values, because it's calculated as a side effect # of calculating all other (non-constant) cached values. # # If item._cached_vis is None, it means there can't be cached # values on other items that depend on 'item', because if there # were, some value on 'item' would have been calculated and # item._cached_vis set as a side effect. It's therefore safe to # stop the invalidation at symbols with _cached_vis None. # # This approach massively speeds up scripts that set a lot of # values, vs simply invalidating all possibly dependent symbols # (even when you already have a list of all the dependent # symbols, because some symbols get huge dependency trees). # # This gracefully handles dependency loops too, which is nice # for choices, where the choice depends on the choice symbols # and vice versa. if item._cached_vis is not None: item._rec_invalidate() def _rec_invalidate_if_has_prompt(self): # Invalidates the symbol and its dependent symbols, but only if the # symbol has a prompt. User values never have an effect on promptless # symbols, so we skip invalidation for them as an optimization. # # This also prevents constant (quoted) symbols from being invalidated # if set_value() is called on them, which would make them lose their # value and break things. # # Prints a warning if the symbol has no prompt. In some contexts (e.g. # when loading a .config files) assignments to promptless symbols are # normal and expected, so the warning can be disabled. for node in self.nodes: if node.prompt: self._rec_invalidate() return if self.kconfig._warn_assign_no_prompt: self.kconfig._warn(self.name_and_loc + " has no prompt, meaning " "user values have no effect on it") def _str_default(self): # write_min_config() helper function. Returns the value the symbol # would get from defaults if it didn't have a user value. Uses exactly # the same algorithm as the C implementation (though a bit cleaned up), # for compatibility. if self.orig_type in _BOOL_TRISTATE: val = 0 # Defaults, selects, and implies do not affect choice symbols if not self.choice: for default, cond in self.defaults: cond_val = expr_value(cond) if cond_val: val = min(expr_value(default), cond_val) break val = max(expr_value(self.rev_dep), expr_value(self.weak_rev_dep), val) # Transpose mod to yes if type is bool (possibly due to modules # being disabled) if val == 1 and self.type is BOOL: val = 2 return TRI_TO_STR[val] if self.orig_type: # STRING/INT/HEX for default, cond in self.defaults: if expr_value(cond): return default.str_value return "" def _warn_select_unsatisfied_deps(self): # Helper for printing an informative warning when a symbol with # unsatisfied direct dependencies (dependencies from 'depends on', ifs, # and menus) is selected by some other symbol. Also warn if a symbol # whose direct dependencies evaluate to m is selected to y. msg = "{} has direct dependencies {} with value {}, but is " \ "currently being {}-selected by the following symbols:" \ .format(self.name_and_loc, expr_str(self.direct_dep), TRI_TO_STR[expr_value(self.direct_dep)], TRI_TO_STR[expr_value(self.rev_dep)]) # The reverse dependencies from each select are ORed together for select in split_expr(self.rev_dep, OR): if expr_value(select) <= expr_value(self.direct_dep): # Only include selects that exceed the direct dependencies continue # - 'select A if B' turns into A && B # - 'select A' just turns into A # # In both cases, we can split on AND and pick the first operand selecting_sym = split_expr(select, AND)[0] msg += "\n - {}, with value {}, direct dependencies {} " \ "(value: {})" \ .format(selecting_sym.name_and_loc, selecting_sym.str_value, expr_str(selecting_sym.direct_dep), TRI_TO_STR[expr_value(selecting_sym.direct_dep)]) if select.__class__ is tuple: msg += ", and select condition {} (value: {})" \ .format(expr_str(select[2]), TRI_TO_STR[expr_value(select[2])]) self.kconfig._warn(msg) class Choice(object): """ Represents a choice statement: choice ... endchoice The following attributes are available on Choice instances. They should be treated as read-only, and some are implemented through @property magic (but are still efficient to access due to internal caching). Note: Prompts, help texts, and locations are stored in the Choice's MenuNode(s) rather than in the Choice itself. Check the MenuNode class and the Choice.nodes attribute. This organization matches the C tools. name: The name of the choice, e.g. "FOO" for 'choice FOO', or None if the Choice has no name. type: The type of the choice. One of BOOL, TRISTATE, UNKNOWN. UNKNOWN is for choices defined without a type where none of the contained symbols have a type either (otherwise the choice inherits the type of the first symbol defined with a type). When running without modules (CONFIG_MODULES=n), TRISTATE choices magically change type to BOOL. This matches the C tools, and makes sense for menuconfig-like functionality. orig_type: The type as given in the Kconfig file, without any magic applied. Used when printing the choice. tri_value: The tristate value (mode) of the choice. A choice can be in one of three modes: 0 (n) - The choice is disabled and no symbols can be selected. For visible choices, this mode is only possible for choices with the 'optional' flag set (see kconfig-language.txt). 1 (m) - Any number of choice symbols can be set to m, the rest will be n. 2 (y) - One symbol will be y, the rest n. Only tristate choices can be in m mode. The visibility of the choice is an upper bound on the mode, and the mode in turn is an upper bound on the visibility of the choice symbols. To change the mode, use Choice.set_value(). Implementation note: The C tools internally represent choices as a type of symbol, with special-casing in many code paths. This is why there is a lot of similarity to Symbol. The value (mode) of a choice is really just a normal symbol value, and an implicit reverse dependency forces its lower bound to m for visible non-optional choices (the reverse dependency is 'm && <visibility>'). Symbols within choices get the choice propagated as a dependency to their properties. This turns the mode of the choice into an upper bound on e.g. the visibility of choice symbols, and explains the gotcha related to printing choice symbols mentioned in the module docstring. Kconfiglib uses a separate Choice class only because it makes the code and interface less confusing (especially in a user-facing interface). Corresponding attributes have the same name in the Symbol and Choice classes, for consistency and compatibility. str_value: Like choice.tri_value, but gives the value as one of the strings "n", "m", or "y" user_value: The value (mode) selected by the user through Choice.set_value(). Either 0, 1, or 2, or None if the user hasn't selected a mode. See Symbol.user_value. WARNING: Do not assign directly to this. It will break things. Use Choice.set_value() instead. assignable: See the symbol class documentation. Gives the assignable values (modes). selection: The Symbol instance of the currently selected symbol. None if the Choice is not in y mode or has no selected symbol (due to unsatisfied dependencies on choice symbols). WARNING: Do not assign directly to this. It will break things. Call sym.set_value(2) on the choice symbol you want to select instead. user_selection: The symbol selected by the user (by setting it to y). Ignored if the choice is not in y mode, but still remembered so that the choice "snaps back" to the user selection if the mode is changed back to y. This might differ from 'selection' due to unsatisfied dependencies. WARNING: Do not assign directly to this. It will break things. Call sym.set_value(2) on the choice symbol to be selected instead. visibility: See the Symbol class documentation. Acts on the value (mode). name_and_loc: Holds a string like "<choice MY_CHOICE> (defined at foo/Kconfig:12)" , giving the name of the choice and its definition location(s). If the choice has no name (isn't defined with 'choice MY_CHOICE'), then it will be shown as "<choice>" before the list of locations (always a single one in that case). syms: List of symbols contained in the choice. Obscure gotcha: If a symbol depends on the previous symbol within a choice so that an implicit menu is created, it won't be a choice symbol, and won't be included in 'syms'. nodes: A list of MenuNodes for this choice. In practice, the list will probably always contain a single MenuNode, but it is possible to give a choice a name and define it in multiple locations. defaults: List of (symbol, cond) tuples for the choice's 'defaults' properties. For example, 'default A if B && C' is represented as (A, (AND, B, C)). If there is no condition, 'cond' is self.kconfig.y. Note that 'depends on' and parent dependencies are propagated to 'default' conditions. orig_defaults: See the corresponding attribute on the MenuNode class. direct_dep: See Symbol.direct_dep. referenced: A set() with all symbols referenced in the properties and property conditions of the choice. Also includes dependencies from surrounding menus and ifs, because those get propagated to the choice (see the 'Intro to symbol values' section in the module docstring). is_optional: True if the choice has the 'optional' flag set on it and can be in n mode. kconfig: The Kconfig instance this choice is from. """ __slots__ = ( "_cached_assignable", "_cached_selection", "_cached_vis", "_dependents", "_visited", "_was_set", "defaults", "direct_dep", "is_constant", "is_optional", "kconfig", "name", "nodes", "orig_type", "syms", "user_selection", "user_value", ) # # Public interface # @property def type(self): """ Returns the type of the choice. See Symbol.type. """ if self.orig_type is TRISTATE and not self.kconfig.modules.tri_value: return BOOL return self.orig_type @property def str_value(self): """ See the class documentation. """ return TRI_TO_STR[self.tri_value] @property def tri_value(self): """ See the class documentation. """ # This emulates a reverse dependency of 'm && visibility' for # non-optional choices, which is how the C implementation does it val = 0 if self.is_optional else 1 if self.user_value is not None: val = max(val, self.user_value) # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) val = min(val, self.visibility) # Promote m to y for boolean choices return 2 if val == 1 and self.type is BOOL else val @property def assignable(self): """ See the class documentation. """ if self._cached_assignable is None: self._cached_assignable = self._assignable() return self._cached_assignable @property def visibility(self): """ See the class documentation. """ if self._cached_vis is None: self._cached_vis = _visibility(self) return self._cached_vis @property def name_and_loc(self): """ See the class documentation. """ # Reuse the expression format, which is '<choice (name, if any)>'. return standard_sc_expr_str(self) + " " + _locs(self) @property def selection(self): """ See the class documentation. """ if self._cached_selection is _NO_CACHED_SELECTION: self._cached_selection = self._selection() return self._cached_selection def set_value(self, value): """ Sets the user value (mode) of the choice. Like for Symbol.set_value(), the visibility might truncate the value. Choices without the 'optional' attribute (is_optional) can never be in n mode, but 0/"n" is still accepted since it's not a malformed value (though it will have no effect). Returns True if the value is valid for the type of the choice, and False otherwise. This only looks at the form of the value. Check the Choice.assignable attribute to see what values are currently in range and would actually be reflected in the mode of the choice. """ if value in STR_TO_TRI: value = STR_TO_TRI[value] if value == self.user_value: # We know the value must be valid if it was successfully set # previously self._was_set = True return True if not (self.orig_type is BOOL and value in (2, 0) or self.orig_type is TRISTATE and value in TRI_TO_STR): # Display tristate values as n, m, y in the warning self.kconfig._warn( "the value {} is invalid for {}, which has type {} -- " "assignment ignored" .format(TRI_TO_STR[value] if value in TRI_TO_STR else "'{}'".format(value), self.name_and_loc, TYPE_TO_STR[self.orig_type])) return False self.user_value = value self._was_set = True self._rec_invalidate() return True def unset_value(self): """ Resets the user value (mode) and user selection of the Choice, as if the user had never touched the mode or any of the choice symbols. """ if self.user_value is not None or self.user_selection: self.user_value = self.user_selection = None self._rec_invalidate() @property def referenced(self): """ See the class documentation. """ return {item for node in self.nodes for item in node.referenced} @property def orig_defaults(self): """ See the class documentation. """ return [d for node in self.nodes for d in node.orig_defaults] def __repr__(self): """ Returns a string with information about the choice when it is evaluated on e.g. the interactive Python prompt. """ fields = ["choice " + self.name if self.name else "choice", TYPE_TO_STR[self.type]] add = fields.append for node in self.nodes: if node.prompt: add('"{}"'.format(node.prompt[0])) add("mode " + self.str_value) if self.user_value is not None: add('user mode {}'.format(TRI_TO_STR[self.user_value])) if self.selection: add("{} selected".format(self.selection.name)) if self.user_selection: user_sel_str = "{} selected by user" \ .format(self.user_selection.name) if self.selection is not self.user_selection: user_sel_str += " (overridden)" add(user_sel_str) add("visibility " + TRI_TO_STR[self.visibility]) if self.is_optional: add("optional") for node in self.nodes: add("{}:{}".format(node.filename, node.linenr)) return "<{}>".format(", ".join(fields)) def __str__(self): """ Returns a string representation of the choice when it is printed. Matches the Kconfig format (though without the contained choice symbols), with any parent dependencies propagated to the 'depends on' condition. The returned string does not end in a newline. See Symbol.__str__() as well. """ return self.custom_str(standard_sc_expr_str) def custom_str(self, sc_expr_str_fn): """ Works like Choice.__str__(), but allows a custom format to be used for all symbol/choice references. See expr_str(). """ return "\n\n".join(node.custom_str(sc_expr_str_fn) for node in self.nodes) # # Private methods # def __init__(self): """ Choice constructor -- not intended to be called directly by Kconfiglib clients. """ # These attributes are always set on the instance from outside and # don't need defaults: # direct_dep # kconfig # - UNKNOWN == 0 # - _visited is used during dep. loop detection self.orig_type = self._visited = 0 self.nodes = [] self.syms = [] self.defaults = [] self.name = \ self.user_value = self.user_selection = \ self._cached_vis = self._cached_assignable = None self._cached_selection = _NO_CACHED_SELECTION # is_constant is checked by _depend_on(). Just set it to avoid having # to special-case choices. self.is_constant = self.is_optional = False # See Kconfig._build_dep() self._dependents = set() def _assignable(self): # Worker function for the 'assignable' attribute # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) vis = self.visibility if not vis: return () if vis == 2: if not self.is_optional: return (2,) if self.type is BOOL else (1, 2) return (0, 2) if self.type is BOOL else (0, 1, 2) # vis == 1 return (0, 1) if self.is_optional else (1,) def _selection(self): # Worker function for the 'selection' attribute # Warning: See Symbol._rec_invalidate(), and note that this is a hidden # function call (property magic) if self.tri_value != 2: # Not in y mode, so no selection return None # Use the user selection if it's visible if self.user_selection and self.user_selection.visibility: return self.user_selection # Otherwise, check if we have a default return self._selection_from_defaults() def _selection_from_defaults(self): # Check if we have a default for sym, cond in self.defaults: # The default symbol must be visible too if expr_value(cond) and sym.visibility: return sym # Otherwise, pick the first visible symbol, if any for sym in self.syms: if sym.visibility: return sym # Couldn't find a selection return None def _invalidate(self): self._cached_vis = self._cached_assignable = None self._cached_selection = _NO_CACHED_SELECTION def _rec_invalidate(self): # See Symbol._rec_invalidate() self._invalidate() for item in self._dependents: if item._cached_vis is not None: item._rec_invalidate() class MenuNode(object): """ Represents a menu node in the configuration. This corresponds to an entry in e.g. the 'make menuconfig' interface, though non-visible choices, menus, and comments also get menu nodes. If a symbol or choice is defined in multiple locations, it gets one menu node for each location. The top-level menu node, corresponding to the implicit top-level menu, is available in Kconfig.top_node. The menu nodes for a Symbol or Choice can be found in the Symbol/Choice.nodes attribute. Menus and comments are represented as plain menu nodes, with their text stored in the prompt attribute (prompt[0]). This mirrors the C implementation. The following attributes are available on MenuNode instances. They should be viewed as read-only. item: Either a Symbol, a Choice, or one of the constants MENU and COMMENT. Menus and comments are represented as plain menu nodes. Ifs are collapsed (matching the C implementation) and do not appear in the final menu tree. next: The following menu node. None if there is no following node. list: The first child menu node. None if there are no children. Choices and menus naturally have children, but Symbols can also have children because of menus created automatically from dependencies (see kconfig-language.txt). parent: The parent menu node. None if there is no parent. prompt: A (string, cond) tuple with the prompt for the menu node and its conditional expression (which is self.kconfig.y if there is no condition). None if there is no prompt. For symbols and choices, the prompt is stored in the MenuNode rather than the Symbol or Choice instance. For menus and comments, the prompt holds the text. defaults: The 'default' properties for this particular menu node. See symbol.defaults. When evaluating defaults, you should use Symbol/Choice.defaults instead, as it include properties from all menu nodes (a symbol/choice can have multiple definition locations/menu nodes). MenuNode.defaults is meant for documentation generation. selects: Like MenuNode.defaults, for selects. implies: Like MenuNode.defaults, for implies. ranges: Like MenuNode.defaults, for ranges. orig_prompt: orig_defaults: orig_selects: orig_implies: orig_ranges: These work the like the corresponding attributes without orig_*, but omit any dependencies propagated from 'depends on' and surrounding 'if's (the direct dependencies, stored in MenuNode.dep). One use for this is generating less cluttered documentation, by only showing the direct dependencies in one place. help: The help text for the menu node for Symbols and Choices. None if there is no help text. Always stored in the node rather than the Symbol or Choice. It is possible to have a separate help text at each location if a symbol is defined in multiple locations. Trailing whitespace (including a final newline) is stripped from the help text. This was not the case before Kconfiglib 10.21.0, where the format was undocumented. dep: The direct ('depends on') dependencies for the menu node, or self.kconfig.y if there are no direct dependencies. This attribute includes any dependencies from surrounding menus and ifs. Those get propagated to the direct dependencies, and the resulting direct dependencies in turn get propagated to the conditions of all properties. If a symbol or choice is defined in multiple locations, only the properties defined at a particular location get the corresponding MenuNode.dep dependencies propagated to them. visibility: The 'visible if' dependencies for the menu node (which must represent a menu), or self.kconfig.y if there are no 'visible if' dependencies. 'visible if' dependencies are recursively propagated to the prompts of symbols and choices within the menu. referenced: A set() with all symbols and choices referenced in the properties and property conditions of the menu node. Also includes dependencies inherited from surrounding menus and ifs. Choices appear in the dependencies of choice symbols. is_menuconfig: Set to True if the children of the menu node should be displayed in a separate menu. This is the case for the following items: - Menus (node.item == MENU) - Choices - Symbols defined with the 'menuconfig' keyword. The children come from implicitly created submenus, and should be displayed in a separate menu rather than being indented. 'is_menuconfig' is just a hint on how to display the menu node. It's ignored internally by Kconfiglib, except when printing symbols. filename/linenr: The location where the menu node appears. The filename is relative to $srctree (or to the current directory if $srctree isn't set), except absolute paths are used for paths outside $srctree. include_path: A tuple of (filename, linenr) tuples, giving the locations of the 'source' statements via which the Kconfig file containing this menu node was included. The first element is the location of the 'source' statement in the top-level Kconfig file passed to Kconfig.__init__(), etc. Note that the Kconfig file of the menu node itself isn't included. Check 'filename' and 'linenr' for that. kconfig: The Kconfig instance the menu node is from. """ __slots__ = ( "dep", "filename", "help", "include_path", "is_menuconfig", "item", "kconfig", "linenr", "list", "next", "parent", "prompt", "visibility", # Properties "defaults", "selects", "implies", "ranges", ) def __init__(self): # Properties defined on this particular menu node. A local 'depends on' # only applies to these, in case a symbol is defined in multiple # locations. self.defaults = [] self.selects = [] self.implies = [] self.ranges = [] @property def orig_prompt(self): """ See the class documentation. """ if not self.prompt: return None return (self.prompt[0], self._strip_dep(self.prompt[1])) @property def orig_defaults(self): """ See the class documentation. """ return [(default, self._strip_dep(cond)) for default, cond in self.defaults] @property def orig_selects(self): """ See the class documentation. """ return [(select, self._strip_dep(cond)) for select, cond in self.selects] @property def orig_implies(self): """ See the class documentation. """ return [(imply, self._strip_dep(cond)) for imply, cond in self.implies] @property def orig_ranges(self): """ See the class documentation. """ return [(low, high, self._strip_dep(cond)) for low, high, cond in self.ranges] @property def referenced(self): """ See the class documentation. """ # self.dep is included to catch dependencies from a lone 'depends on' # when there are no properties to propagate it to res = expr_items(self.dep) if self.prompt: res |= expr_items(self.prompt[1]) if self.item is MENU: res |= expr_items(self.visibility) for value, cond in self.defaults: res |= expr_items(value) res |= expr_items(cond) for value, cond in self.selects: res.add(value) res |= expr_items(cond) for value, cond in self.implies: res.add(value) res |= expr_items(cond) for low, high, cond in self.ranges: res.add(low) res.add(high) res |= expr_items(cond) return res def __repr__(self): """ Returns a string with information about the menu node when it is evaluated on e.g. the interactive Python prompt. """ fields = [] add = fields.append if self.item.__class__ is Symbol: add("menu node for symbol " + self.item.name) elif self.item.__class__ is Choice: s = "menu node for choice" if self.item.name is not None: s += " " + self.item.name add(s) elif self.item is MENU: add("menu node for menu") else: # self.item is COMMENT add("menu node for comment") if self.prompt: add('prompt "{}" (visibility {})'.format( self.prompt[0], TRI_TO_STR[expr_value(self.prompt[1])])) if self.item.__class__ is Symbol and self.is_menuconfig: add("is menuconfig") add("deps " + TRI_TO_STR[expr_value(self.dep)]) if self.item is MENU: add("'visible if' deps " + TRI_TO_STR[expr_value(self.visibility)]) if self.item.__class__ in _SYMBOL_CHOICE and self.help is not None: add("has help") if self.list: add("has child") if self.next: add("has next") add("{}:{}".format(self.filename, self.linenr)) return "<{}>".format(", ".join(fields)) def __str__(self): """ Returns a string representation of the menu node. Matches the Kconfig format, with any parent dependencies propagated to the 'depends on' condition. The output could (almost) be fed back into a Kconfig parser to redefine the object associated with the menu node. See the module documentation for a gotcha related to choice symbols. For symbols and choices with multiple menu nodes (multiple definition locations), properties that aren't associated with a particular menu node are shown on all menu nodes ('option env=...', 'optional' for choices, etc.). The returned string does not end in a newline. """ return self.custom_str(standard_sc_expr_str) def custom_str(self, sc_expr_str_fn): """ Works like MenuNode.__str__(), but allows a custom format to be used for all symbol/choice references. See expr_str(). """ return self._menu_comment_node_str(sc_expr_str_fn) \ if self.item in _MENU_COMMENT else \ self._sym_choice_node_str(sc_expr_str_fn) def _menu_comment_node_str(self, sc_expr_str_fn): s = '{} "{}"'.format("menu" if self.item is MENU else "comment", self.prompt[0]) if self.dep is not self.kconfig.y: s += "\n\tdepends on {}".format(expr_str(self.dep, sc_expr_str_fn)) if self.item is MENU and self.visibility is not self.kconfig.y: s += "\n\tvisible if {}".format(expr_str(self.visibility, sc_expr_str_fn)) return s def _sym_choice_node_str(self, sc_expr_str_fn): def indent_add(s): lines.append("\t" + s) def indent_add_cond(s, cond): if cond is not self.kconfig.y: s += " if " + expr_str(cond, sc_expr_str_fn) indent_add(s) sc = self.item if sc.__class__ is Symbol: lines = [("menuconfig " if self.is_menuconfig else "config ") + sc.name] else: lines = ["choice " + sc.name if sc.name else "choice"] if sc.orig_type and not self.prompt: # sc.orig_type != UNKNOWN # If there's a prompt, we'll use the '<type> "prompt"' shorthand # instead indent_add(TYPE_TO_STR[sc.orig_type]) if self.prompt: if sc.orig_type: prefix = TYPE_TO_STR[sc.orig_type] else: # Symbol defined without a type (which generates a warning) prefix = "prompt" indent_add_cond(prefix + ' "{}"'.format(escape(self.prompt[0])), self.orig_prompt[1]) if sc.__class__ is Symbol: if sc.is_allnoconfig_y: indent_add("option allnoconfig_y") if sc is sc.kconfig.defconfig_list: indent_add("option defconfig_list") if sc.env_var is not None: indent_add('option env="{}"'.format(sc.env_var)) if sc is sc.kconfig.modules: indent_add("option modules") for low, high, cond in self.orig_ranges: indent_add_cond( "range {} {}".format(sc_expr_str_fn(low), sc_expr_str_fn(high)), cond) for default, cond in self.orig_defaults: indent_add_cond("default " + expr_str(default, sc_expr_str_fn), cond) if sc.__class__ is Choice and sc.is_optional: indent_add("optional") if sc.__class__ is Symbol: for select, cond in self.orig_selects: indent_add_cond("select " + sc_expr_str_fn(select), cond) for imply, cond in self.orig_implies: indent_add_cond("imply " + sc_expr_str_fn(imply), cond) if self.dep is not sc.kconfig.y: indent_add("depends on " + expr_str(self.dep, sc_expr_str_fn)) if self.help is not None: indent_add("help") for line in self.help.splitlines(): indent_add(" " + line) return "\n".join(lines) def _strip_dep(self, expr): # Helper function for removing MenuNode.dep from 'expr'. Uses two # pieces of internal knowledge: (1) Expressions are reused rather than # copied, and (2) the direct dependencies always appear at the end. # ... if dep -> ... if y if self.dep is expr: return self.kconfig.y # (AND, X, dep) -> X if expr.__class__ is tuple and expr[0] is AND and expr[2] is self.dep: return expr[1] return expr class Variable(object): """ Represents a preprocessor variable/function. The following attributes are available: name: The name of the variable. value: The unexpanded value of the variable. expanded_value: The expanded value of the variable. For simple variables (those defined with :=), this will equal 'value'. Accessing this property will raise a KconfigError if the expansion seems to be stuck in a loop. Accessing this field is the same as calling expanded_value_w_args() with no arguments. I hadn't considered function arguments when adding it. It is retained for backwards compatibility though. is_recursive: True if the variable is recursive (defined with =). """ __slots__ = ( "_n_expansions", "is_recursive", "kconfig", "name", "value", ) @property def expanded_value(self): """ See the class documentation. """ return self.expanded_value_w_args() def expanded_value_w_args(self, *args): """ Returns the expanded value of the variable/function. Any arguments passed will be substituted for $(1), $(2), etc. Raises a KconfigError if the expansion seems to be stuck in a loop. """ return self.kconfig._fn_val((self.name,) + args) def __repr__(self): return "<variable {}, {}, value '{}'>" \ .format(self.name, "recursive" if self.is_recursive else "immediate", self.value) class KconfigError(Exception): """ Exception raised for Kconfig-related errors. KconfigError and KconfigSyntaxError are the same class. The KconfigSyntaxError alias is only maintained for backwards compatibility. """ KconfigSyntaxError = KconfigError # Backwards compatibility class InternalError(Exception): "Never raised. Kept around for backwards compatibility." # Workaround: # # If 'errno' and 'strerror' are set on IOError, then __str__() always returns # "[Errno <errno>] <strerror>", ignoring any custom message passed to the # constructor. By defining our own subclass, we can use a custom message while # also providing 'errno', 'strerror', and 'filename' to scripts. class _KconfigIOError(IOError): def __init__(self, ioerror, msg): self.msg = msg super(_KconfigIOError, self).__init__( ioerror.errno, ioerror.strerror, ioerror.filename) def __str__(self): return self.msg # # Public functions # def expr_value(expr): """ Evaluates the expression 'expr' to a tristate value. Returns 0 (n), 1 (m), or 2 (y). 'expr' must be an already-parsed expression from a Symbol, Choice, or MenuNode property. To evaluate an expression represented as a string, use Kconfig.eval_string(). Passing subexpressions of expressions to this function works as expected. """ if expr.__class__ is not tuple: return expr.tri_value if expr[0] is AND: v1 = expr_value(expr[1]) # Short-circuit the n case as an optimization (~5% faster # allnoconfig.py and allyesconfig.py, as of writing) return 0 if not v1 else min(v1, expr_value(expr[2])) if expr[0] is OR: v1 = expr_value(expr[1]) # Short-circuit the y case as an optimization return 2 if v1 == 2 else max(v1, expr_value(expr[2])) if expr[0] is NOT: return 2 - expr_value(expr[1]) # Relation # # Implements <, <=, >, >= comparisons as well. These were added to # kconfig in 31847b67 (kconfig: allow use of relations other than # (in)equality). rel, v1, v2 = expr # If both operands are strings... if v1.orig_type is STRING and v2.orig_type is STRING: # ...then compare them lexicographically comp = _strcmp(v1.str_value, v2.str_value) else: # Otherwise, try to compare them as numbers try: comp = _sym_to_num(v1) - _sym_to_num(v2) except ValueError: # Fall back on a lexicographic comparison if the operands don't # parse as numbers comp = _strcmp(v1.str_value, v2.str_value) return 2*(comp == 0 if rel is EQUAL else comp != 0 if rel is UNEQUAL else comp < 0 if rel is LESS else comp <= 0 if rel is LESS_EQUAL else comp > 0 if rel is GREATER else comp >= 0) def standard_sc_expr_str(sc): """ Standard symbol/choice printing function. Uses plain Kconfig syntax, and displays choices as <choice> (or <choice NAME>, for named choices). See expr_str(). """ if sc.__class__ is Symbol: if sc.is_constant and sc.name not in STR_TO_TRI: return '"{}"'.format(escape(sc.name)) return sc.name return "<choice {}>".format(sc.name) if sc.name else "<choice>" def expr_str(expr, sc_expr_str_fn=standard_sc_expr_str): """ Returns the string representation of the expression 'expr', as in a Kconfig file. Passing subexpressions of expressions to this function works as expected. sc_expr_str_fn (default: standard_sc_expr_str): This function is called for every symbol/choice (hence "sc") appearing in the expression, with the symbol/choice as the argument. It is expected to return a string to be used for the symbol/choice. This can be used e.g. to turn symbols/choices into links when generating documentation, or for printing the value of each symbol/choice after it. Note that quoted values are represented as constants symbols (Symbol.is_constant == True). """ if expr.__class__ is not tuple: return sc_expr_str_fn(expr) if expr[0] is AND: return "{} && {}".format(_parenthesize(expr[1], OR, sc_expr_str_fn), _parenthesize(expr[2], OR, sc_expr_str_fn)) if expr[0] is OR: # This turns A && B || C && D into "(A && B) || (C && D)", which is # redundant, but more readable return "{} || {}".format(_parenthesize(expr[1], AND, sc_expr_str_fn), _parenthesize(expr[2], AND, sc_expr_str_fn)) if expr[0] is NOT: if expr[1].__class__ is tuple: return "!({})".format(expr_str(expr[1], sc_expr_str_fn)) return "!" + sc_expr_str_fn(expr[1]) # Symbol # Relation # # Relation operands are always symbols (quoted strings are constant # symbols) return "{} {} {}".format(sc_expr_str_fn(expr[1]), REL_TO_STR[expr[0]], sc_expr_str_fn(expr[2])) def expr_items(expr): """ Returns a set() of all items (symbols and choices) that appear in the expression 'expr'. Passing subexpressions of expressions to this function works as expected. """ res = set() def rec(subexpr): if subexpr.__class__ is tuple: # AND, OR, NOT, or relation rec(subexpr[1]) # NOTs only have a single operand if subexpr[0] is not NOT: rec(subexpr[2]) else: # Symbol or choice res.add(subexpr) rec(expr) return res def split_expr(expr, op): """ Returns a list containing the top-level AND or OR operands in the expression 'expr', in the same (left-to-right) order as they appear in the expression. This can be handy e.g. for splitting (weak) reverse dependencies from 'select' and 'imply' into individual selects/implies. op: Either AND to get AND operands, or OR to get OR operands. (Having this as an operand might be more future-safe than having two hardcoded functions.) Pseudo-code examples: split_expr( A , OR ) -> [A] split_expr( A && B , OR ) -> [A && B] split_expr( A || B , OR ) -> [A, B] split_expr( A || B , AND ) -> [A || B] split_expr( A || B || (C && D) , OR ) -> [A, B, C && D] # Second || is not at the top level split_expr( A || (B && (C || D)) , OR ) -> [A, B && (C || D)] # Parentheses don't matter as long as we stay at the top level (don't # encounter any non-'op' nodes) split_expr( (A || B) || C , OR ) -> [A, B, C] split_expr( A || (B || C) , OR ) -> [A, B, C] """ res = [] def rec(subexpr): if subexpr.__class__ is tuple and subexpr[0] is op: rec(subexpr[1]) rec(subexpr[2]) else: res.append(subexpr) rec(expr) return res def escape(s): r""" Escapes the string 's' in the same fashion as is done for display in Kconfig format and when writing strings to a .config file. " and \ are replaced by \" and \\, respectively. """ # \ must be escaped before " to avoid double escaping return s.replace("\\", r"\\").replace('"', r'\"') def unescape(s): r""" Unescapes the string 's'. \ followed by any character is replaced with just that character. Used internally when reading .config files. """ return _unescape_sub(r"\1", s) # unescape() helper _unescape_sub = re.compile(r"\\(.)").sub def standard_kconfig(description=None): """ Argument parsing helper for tools that take a single optional Kconfig file argument (default: Kconfig). Returns the Kconfig instance for the parsed configuration. Uses argparse internally. Exits with sys.exit() (which raises SystemExit) on errors. description (default: None): The 'description' passed to argparse.ArgumentParser(). argparse.RawDescriptionHelpFormatter is used, so formatting is preserved. """ import argparse parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=description) parser.add_argument( "kconfig", metavar="KCONFIG", default="Kconfig", nargs="?", help="Top-level Kconfig file (default: Kconfig)") return Kconfig(parser.parse_args().kconfig, suppress_traceback=True) def standard_config_filename(): """ Helper for tools. Returns the value of KCONFIG_CONFIG (which specifies the .config file to load/save) if it is set, and ".config" otherwise. Calling load_config() with filename=None might give the behavior you want, without having to use this function. """ return os.getenv("KCONFIG_CONFIG", ".config") def load_allconfig(kconf, filename): """ Use Kconfig.load_allconfig() instead, which was added in Kconfiglib 13.4.0. Supported for backwards compatibility. Might be removed at some point after a long period of deprecation warnings. """ allconfig = os.getenv("KCONFIG_ALLCONFIG") if allconfig is None: return def std_msg(e): # "Upcasts" a _KconfigIOError to an IOError, removing the custom # __str__() message. The standard message is better here. # # This might also convert an OSError to an IOError in obscure cases, # but it's probably not a big deal. The distinction is shaky (see # PEP-3151). return IOError(e.errno, e.strerror, e.filename) old_warn_assign_override = kconf.warn_assign_override old_warn_assign_redun = kconf.warn_assign_redun kconf.warn_assign_override = kconf.warn_assign_redun = False if allconfig in ("", "1"): try: print(kconf.load_config(filename, False)) except EnvironmentError as e1: try: print(kconf.load_config("all.config", False)) except EnvironmentError as e2: sys.exit("error: KCONFIG_ALLCONFIG is set, but neither {} " "nor all.config could be opened: {}, {}" .format(filename, std_msg(e1), std_msg(e2))) else: try: print(kconf.load_config(allconfig, False)) except EnvironmentError as e: sys.exit("error: KCONFIG_ALLCONFIG is set to '{}', which " "could not be opened: {}" .format(allconfig, std_msg(e))) kconf.warn_assign_override = old_warn_assign_override kconf.warn_assign_redun = old_warn_assign_redun # # Internal functions # def _visibility(sc): # Symbols and Choices have a "visibility" that acts as an upper bound on # the values a user can set for them, corresponding to the visibility in # e.g. 'make menuconfig'. This function calculates the visibility for the # Symbol or Choice 'sc' -- the logic is nearly identical. vis = 0 for node in sc.nodes: if node.prompt: vis = max(vis, expr_value(node.prompt[1])) if sc.__class__ is Symbol and sc.choice: if sc.choice.orig_type is TRISTATE and \ sc.orig_type is not TRISTATE and sc.choice.tri_value != 2: # Non-tristate choice symbols are only visible in y mode return 0 if sc.orig_type is TRISTATE and vis == 1 and sc.choice.tri_value == 2: # Choice symbols with m visibility are not visible in y mode return 0 # Promote m to y if we're dealing with a non-tristate (possibly due to # modules being disabled) if vis == 1 and sc.type is not TRISTATE: return 2 return vis def _depend_on(sc, expr): # Adds 'sc' (symbol or choice) as a "dependee" to all symbols in 'expr'. # Constant symbols in 'expr' are skipped as they can never change value # anyway. if expr.__class__ is tuple: # AND, OR, NOT, or relation _depend_on(sc, expr[1]) # NOTs only have a single operand if expr[0] is not NOT: _depend_on(sc, expr[2]) elif not expr.is_constant: # Non-constant symbol, or choice expr._dependents.add(sc) def _parenthesize(expr, type_, sc_expr_str_fn): # expr_str() helper. Adds parentheses around expressions of type 'type_'. if expr.__class__ is tuple and expr[0] is type_: return "({})".format(expr_str(expr, sc_expr_str_fn)) return expr_str(expr, sc_expr_str_fn) def _ordered_unique(lst): # Returns 'lst' with any duplicates removed, preserving order. This hacky # version seems to be a common idiom. It relies on short-circuit evaluation # and set.add() returning None, which is falsy. seen = set() seen_add = seen.add return [x for x in lst if x not in seen and not seen_add(x)] def _is_base_n(s, n): try: int(s, n) return True except ValueError: return False def _strcmp(s1, s2): # strcmp()-alike that returns -1, 0, or 1 return (s1 > s2) - (s1 < s2) def _sym_to_num(sym): # expr_value() helper for converting a symbol to a number. Raises # ValueError for symbols that can't be converted. # For BOOL and TRISTATE, n/m/y count as 0/1/2. This mirrors 9059a3493ef # ("kconfig: fix relational operators for bool and tristate symbols") in # the C implementation. return sym.tri_value if sym.orig_type in _BOOL_TRISTATE else \ int(sym.str_value, _TYPE_TO_BASE[sym.orig_type]) def _touch_dep_file(path, sym_name): # If sym_name is MY_SYM_NAME, touches my/sym/name.h. See the sync_deps() # docstring. sym_path = path + os.sep + sym_name.lower().replace("_", os.sep) + ".h" sym_path_dir = dirname(sym_path) if not exists(sym_path_dir): os.makedirs(sym_path_dir, 0o755) # A kind of truncating touch, mirroring the C tools os.close(os.open( sym_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o644)) def _save_old(path): # See write_config() if not os.path.isfile(path): # Backup only files (and symlinks to files). Simplest alternative # to avoid e.g. (potentially successful attempt to) rename /dev/null # (and to keep fifos). return def copy(src, dst): # Import as needed, to save some startup time import shutil shutil.copyfile(src, dst) if islink(path): # Preserve symlinks copy_fn = copy elif hasattr(os, "replace"): # Python 3 (3.3+) only. Best choice when available, because it # removes <filename>.old on both *nix and Windows. copy_fn = os.replace elif os.name == "posix": # Removes <filename>.old on POSIX systems copy_fn = os.rename else: # Fall back on copying copy_fn = copy try: copy_fn(path, path + ".old") except Exception: # Ignore errors from 'path' missing as well as other errors. # <filename>.old file is usually more of a nice-to-have, and not worth # erroring out over e.g. if <filename>.old happens to be a directory. pass def _locs(sc): # Symbol/Choice.name_and_loc helper. Returns the "(defined at ...)" part of # the string. 'sc' is a Symbol or Choice. if sc.nodes: return "(defined at {})".format( ", ".join("{0.filename}:{0.linenr}".format(node) for node in sc.nodes)) return "(undefined)" # Menu manipulation def _expr_depends_on(expr, sym): # Reimplementation of expr_depends_symbol() from mconf.c. Used to determine # if a submenu should be implicitly created. This also influences which # items inside choice statements are considered choice items. if expr.__class__ is not tuple: return expr is sym if expr[0] in _EQUAL_UNEQUAL: # Check for one of the following: # sym = m/y, m/y = sym, sym != n, n != sym left, right = expr[1:] if right is sym: left, right = right, left elif left is not sym: return False return (expr[0] is EQUAL and right is sym.kconfig.m or right is sym.kconfig.y) or \ (expr[0] is UNEQUAL and right is sym.kconfig.n) return expr[0] is AND and \ (_expr_depends_on(expr[1], sym) or _expr_depends_on(expr[2], sym)) def _auto_menu_dep(node1, node2): # Returns True if node2 has an "automatic menu dependency" on node1. If # node2 has a prompt, we check its condition. Otherwise, we look directly # at node2.dep. return _expr_depends_on(node2.prompt[1] if node2.prompt else node2.dep, node1.item) def _flatten(node): # "Flattens" menu nodes without prompts (e.g. 'if' nodes and non-visible # symbols with children from automatic menu creation) so that their # children appear after them instead. This gives a clean menu structure # with no unexpected "jumps" in the indentation. # # Do not flatten promptless choices (which can appear "legitimately" if a # named choice is defined in multiple locations to add on symbols). It # looks confusing, and the menuconfig already shows all choice symbols if # you enter the choice at some location with a prompt. while node: if node.list and not node.prompt and \ node.item.__class__ is not Choice: last_node = node.list while 1: last_node.parent = node.parent if not last_node.next: break last_node = last_node.next last_node.next = node.next node.next = node.list node.list = None node = node.next def _remove_ifs(node): # Removes 'if' nodes (which can be recognized by MenuNode.item being None), # which are assumed to already have been flattened. The C implementation # doesn't bother to do this, but we expose the menu tree directly, and it # makes it nicer to work with. cur = node.list while cur and not cur.item: cur = cur.next node.list = cur while cur: next = cur.next while next and not next.item: next = next.next # Equivalent to # # cur.next = next # cur = next # # due to tricky Python semantics. The order matters. cur.next = cur = next def _finalize_choice(node): # Finalizes a choice, marking each symbol whose menu node has the choice as # the parent as a choice symbol, and automatically determining types if not # specified. choice = node.item cur = node.list while cur: if cur.item.__class__ is Symbol: cur.item.choice = choice choice.syms.append(cur.item) cur = cur.next # If no type is specified for the choice, its type is that of # the first choice item with a specified type if not choice.orig_type: for item in choice.syms: if item.orig_type: choice.orig_type = item.orig_type break # Each choice item of UNKNOWN type gets the type of the choice for sym in choice.syms: if not sym.orig_type: sym.orig_type = choice.orig_type def _check_dep_loop_sym(sym, ignore_choice): # Detects dependency loops using depth-first search on the dependency graph # (which is calculated earlier in Kconfig._build_dep()). # # Algorithm: # # 1. Symbols/choices start out with _visited = 0, meaning unvisited. # # 2. When a symbol/choice is first visited, _visited is set to 1, meaning # "visited, potentially part of a dependency loop". The recursive # search then continues from the symbol/choice. # # 3. If we run into a symbol/choice X with _visited already set to 1, # there's a dependency loop. The loop is found on the call stack by # recording symbols while returning ("on the way back") until X is seen # again. # # 4. Once a symbol/choice and all its dependencies (or dependents in this # case) have been checked recursively without detecting any loops, its # _visited is set to 2, meaning "visited, not part of a dependency # loop". # # This saves work if we run into the symbol/choice again in later calls # to _check_dep_loop_sym(). We just return immediately. # # Choices complicate things, as every choice symbol depends on every other # choice symbol in a sense. When a choice is "entered" via a choice symbol # X, we visit all choice symbols from the choice except X, and prevent # immediately revisiting the choice with a flag (ignore_choice). # # Maybe there's a better way to handle this (different flags or the # like...) if not sym._visited: # sym._visited == 0, unvisited sym._visited = 1 for dep in sym._dependents: # Choices show up in Symbol._dependents when the choice has the # symbol in a 'prompt' or 'default' condition (e.g. # 'default ... if SYM'). # # Since we aren't entering the choice via a choice symbol, all # choice symbols need to be checked, hence the None. loop = _check_dep_loop_choice(dep, None) \ if dep.__class__ is Choice \ else _check_dep_loop_sym(dep, False) if loop: # Dependency loop found return _found_dep_loop(loop, sym) if sym.choice and not ignore_choice: loop = _check_dep_loop_choice(sym.choice, sym) if loop: # Dependency loop found return _found_dep_loop(loop, sym) # The symbol is not part of a dependency loop sym._visited = 2 # No dependency loop found return None if sym._visited == 2: # The symbol was checked earlier and is already known to not be part of # a dependency loop return None # sym._visited == 1, found a dependency loop. Return the symbol as the # first element in it. return (sym,) def _check_dep_loop_choice(choice, skip): if not choice._visited: # choice._visited == 0, unvisited choice._visited = 1 # Check for loops involving choice symbols. If we came here via a # choice symbol, skip that one, as we'd get a false positive # '<sym FOO> -> <choice> -> <sym FOO>' loop otherwise. for sym in choice.syms: if sym is not skip: # Prevent the choice from being immediately re-entered via the # "is a choice symbol" path by passing True loop = _check_dep_loop_sym(sym, True) if loop: # Dependency loop found return _found_dep_loop(loop, choice) # The choice is not part of a dependency loop choice._visited = 2 # No dependency loop found return None if choice._visited == 2: # The choice was checked earlier and is already known to not be part of # a dependency loop return None # choice._visited == 1, found a dependency loop. Return the choice as the # first element in it. return (choice,) def _found_dep_loop(loop, cur): # Called "on the way back" when we know we have a loop # Is the symbol/choice 'cur' where the loop started? if cur is not loop[0]: # Nope, it's just a part of the loop return loop + (cur,) # Yep, we have the entire loop. Throw an exception that shows it. msg = "\nDependency loop\n" \ "===============\n\n" for item in loop: if item is not loop[0]: msg += "...depends on " if item.__class__ is Symbol and item.choice: msg += "the choice symbol " msg += "{}, with definition...\n\n{}\n\n" \ .format(item.name_and_loc, item) # Small wart: Since we reuse the already calculated # Symbol/Choice._dependents sets for recursive dependency detection, we # lose information on whether a dependency came from a 'select'/'imply' # condition or e.g. a 'depends on'. # # This might cause selecting symbols to "disappear". For example, # a symbol B having 'select A if C' gives a direct dependency from A to # C, since it corresponds to a reverse dependency of B && C. # # Always print reverse dependencies for symbols that have them to make # sure information isn't lost. I wonder if there's some neat way to # improve this. if item.__class__ is Symbol: if item.rev_dep is not item.kconfig.n: msg += "(select-related dependencies: {})\n\n" \ .format(expr_str(item.rev_dep)) if item.weak_rev_dep is not item.kconfig.n: msg += "(imply-related dependencies: {})\n\n" \ .format(expr_str(item.rev_dep)) msg += "...depends again on " + loop[0].name_and_loc raise KconfigError(msg) def _decoding_error(e, filename, macro_linenr=None): # Gives the filename and context for UnicodeDecodeError's, which are a pain # to debug otherwise. 'e' is the UnicodeDecodeError object. # # If the decoding error is for the output of a $(shell,...) command, # macro_linenr holds the line number where it was run (the exact line # number isn't available for decoding errors in files). raise KconfigError( "\n" "Malformed {} in {}\n" "Context: {}\n" "Problematic data: {}\n" "Reason: {}".format( e.encoding, "'{}'".format(filename) if macro_linenr is None else "output from macro at {}:{}".format(filename, macro_linenr), e.object[max(e.start - 40, 0):e.end + 40], e.object[e.start:e.end], e.reason)) def _warn_verbose_deprecated(fn_name): sys.stderr.write( "Deprecation warning: {0}()'s 'verbose' argument has no effect. Since " "Kconfiglib 12.0.0, the message is returned from {0}() instead, " "and is always generated. Do e.g. print(kconf.{0}()) if you want to " "want to show a message like \"Loaded configuration '.config'\" on " "stdout. The old API required ugly hacks to reuse messages in " "configuration interfaces.\n".format(fn_name)) # Predefined preprocessor functions def _filename_fn(kconf, _): return kconf.filename def _lineno_fn(kconf, _): return str(kconf.linenr) def _info_fn(kconf, _, msg): print("{}:{}: {}".format(kconf.filename, kconf.linenr, msg)) return "" def _warning_if_fn(kconf, _, cond, msg): if cond == "y": kconf._warn(msg, kconf.filename, kconf.linenr) return "" def _error_if_fn(kconf, _, cond, msg): if cond == "y": raise KconfigError("{}:{}: {}".format( kconf.filename, kconf.linenr, msg)) return "" def _shell_fn(kconf, _, command): import subprocess # Only import as needed, to save some startup time stdout, stderr = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate() if not _IS_PY2: try: stdout = stdout.decode(kconf._encoding) stderr = stderr.decode(kconf._encoding) except UnicodeDecodeError as e: _decoding_error(e, kconf.filename, kconf.linenr) if stderr: kconf._warn("'{}' wrote to stderr: {}".format( command, "\n".join(stderr.splitlines())), kconf.filename, kconf.linenr) # Universal newlines with splitlines() (to prevent e.g. stray \r's in # command output on Windows), trailing newline removal, and # newline-to-space conversion. # # On Python 3 versions before 3.6, it's not possible to specify the # encoding when passing universal_newlines=True to Popen() (the 'encoding' # parameter was added in 3.6), so we do this manual version instead. return "\n".join(stdout.splitlines()).rstrip("\n").replace("\n", " ") # # Global constants # TRI_TO_STR = { 0: "n", 1: "m", 2: "y", } STR_TO_TRI = { "n": 0, "m": 1, "y": 2, } # Constant representing that there's no cached choice selection. This is # distinct from a cached None (no selection). Any object that's not None or a # Symbol will do. We test this with 'is'. _NO_CACHED_SELECTION = 0 # Are we running on Python 2? _IS_PY2 = sys.version_info[0] < 3 try: _UNAME_RELEASE = os.uname()[2] except AttributeError: # Only import as needed, to save some startup time import platform _UNAME_RELEASE = platform.uname()[2] # The token and type constants below are safe to test with 'is', which is a bit # faster (~30% faster on my machine, and a few % faster for total parsing # time), even without assuming Python's small integer optimization (which # caches small integer objects). The constants end up pointing to unique # integer objects, and since we consistently refer to them via the names below, # we always get the same object. # # Client code should use == though. # Tokens, with values 1, 2, ... . Avoiding 0 simplifies some checks by making # all tokens except empty strings truthy. ( _T_ALLNOCONFIG_Y, _T_AND, _T_BOOL, _T_CHOICE, _T_CLOSE_PAREN, _T_COMMENT, _T_CONFIG, _T_DEFAULT, _T_DEFCONFIG_LIST, _T_DEF_BOOL, _T_DEF_HEX, _T_DEF_INT, _T_DEF_STRING, _T_DEF_TRISTATE, _T_DEPENDS, _T_ENDCHOICE, _T_ENDIF, _T_ENDMENU, _T_ENV, _T_EQUAL, _T_GREATER, _T_GREATER_EQUAL, _T_HELP, _T_HEX, _T_IF, _T_IMPLY, _T_INT, _T_LESS, _T_LESS_EQUAL, _T_MAINMENU, _T_MENU, _T_MENUCONFIG, _T_MODULES, _T_NOT, _T_ON, _T_OPEN_PAREN, _T_OPTION, _T_OPTIONAL, _T_OR, _T_ORSOURCE, _T_OSOURCE, _T_PROMPT, _T_RANGE, _T_RSOURCE, _T_SELECT, _T_SOURCE, _T_STRING, _T_TRISTATE, _T_UNEQUAL, _T_VISIBLE, ) = range(1, 51) # Keyword to token map, with the get() method assigned directly as a small # optimization _get_keyword = { "---help---": _T_HELP, "allnoconfig_y": _T_ALLNOCONFIG_Y, "bool": _T_BOOL, "boolean": _T_BOOL, "choice": _T_CHOICE, "comment": _T_COMMENT, "config": _T_CONFIG, "def_bool": _T_DEF_BOOL, "def_hex": _T_DEF_HEX, "def_int": _T_DEF_INT, "def_string": _T_DEF_STRING, "def_tristate": _T_DEF_TRISTATE, "default": _T_DEFAULT, "defconfig_list": _T_DEFCONFIG_LIST, "depends": _T_DEPENDS, "endchoice": _T_ENDCHOICE, "endif": _T_ENDIF, "endmenu": _T_ENDMENU, "env": _T_ENV, "grsource": _T_ORSOURCE, # Backwards compatibility "gsource": _T_OSOURCE, # Backwards compatibility "help": _T_HELP, "hex": _T_HEX, "if": _T_IF, "imply": _T_IMPLY, "int": _T_INT, "mainmenu": _T_MAINMENU, "menu": _T_MENU, "menuconfig": _T_MENUCONFIG, "modules": _T_MODULES, "on": _T_ON, "option": _T_OPTION, "optional": _T_OPTIONAL, "orsource": _T_ORSOURCE, "osource": _T_OSOURCE, "prompt": _T_PROMPT, "range": _T_RANGE, "rsource": _T_RSOURCE, "select": _T_SELECT, "source": _T_SOURCE, "string": _T_STRING, "tristate": _T_TRISTATE, "visible": _T_VISIBLE, }.get # The constants below match the value of the corresponding tokens to remove the # need for conversion # Node types MENU = _T_MENU COMMENT = _T_COMMENT # Expression types AND = _T_AND OR = _T_OR NOT = _T_NOT EQUAL = _T_EQUAL UNEQUAL = _T_UNEQUAL LESS = _T_LESS LESS_EQUAL = _T_LESS_EQUAL GREATER = _T_GREATER GREATER_EQUAL = _T_GREATER_EQUAL REL_TO_STR = { EQUAL: "=", UNEQUAL: "!=", LESS: "<", LESS_EQUAL: "<=", GREATER: ">", GREATER_EQUAL: ">=", } # Symbol/choice types. UNKNOWN is 0 (falsy) to simplify some checks. # Client code shouldn't rely on it though, as it was non-zero in # older versions. UNKNOWN = 0 BOOL = _T_BOOL TRISTATE = _T_TRISTATE STRING = _T_STRING INT = _T_INT HEX = _T_HEX TYPE_TO_STR = { UNKNOWN: "unknown", BOOL: "bool", TRISTATE: "tristate", STRING: "string", INT: "int", HEX: "hex", } # Used in comparisons. 0 means the base is inferred from the format of the # string. _TYPE_TO_BASE = { HEX: 16, INT: 10, STRING: 0, UNKNOWN: 0, } # def_bool -> BOOL, etc. _DEF_TOKEN_TO_TYPE = { _T_DEF_BOOL: BOOL, _T_DEF_HEX: HEX, _T_DEF_INT: INT, _T_DEF_STRING: STRING, _T_DEF_TRISTATE: TRISTATE, } # Tokens after which strings are expected. This is used to tell strings from # constant symbol references during tokenization, both of which are enclosed in # quotes. # # Identifier-like lexemes ("missing quotes") are also treated as strings after # these tokens. _T_CHOICE is included to avoid symbols being registered for # named choices. _STRING_LEX = frozenset({ _T_BOOL, _T_CHOICE, _T_COMMENT, _T_HEX, _T_INT, _T_MAINMENU, _T_MENU, _T_ORSOURCE, _T_OSOURCE, _T_PROMPT, _T_RSOURCE, _T_SOURCE, _T_STRING, _T_TRISTATE, }) # Various sets for quick membership tests. Gives a single global lookup and # avoids creating temporary dicts/tuples. _TYPE_TOKENS = frozenset({ _T_BOOL, _T_TRISTATE, _T_INT, _T_HEX, _T_STRING, }) _SOURCE_TOKENS = frozenset({ _T_SOURCE, _T_RSOURCE, _T_OSOURCE, _T_ORSOURCE, }) _REL_SOURCE_TOKENS = frozenset({ _T_RSOURCE, _T_ORSOURCE, }) # Obligatory (non-optional) sources _OBL_SOURCE_TOKENS = frozenset({ _T_SOURCE, _T_RSOURCE, }) _BOOL_TRISTATE = frozenset({ BOOL, TRISTATE, }) _BOOL_TRISTATE_UNKNOWN = frozenset({ BOOL, TRISTATE, UNKNOWN, }) _INT_HEX = frozenset({ INT, HEX, }) _SYMBOL_CHOICE = frozenset({ Symbol, Choice, }) _MENU_COMMENT = frozenset({ MENU, COMMENT, }) _EQUAL_UNEQUAL = frozenset({ EQUAL, UNEQUAL, }) _RELATIONS = frozenset({ EQUAL, UNEQUAL, LESS, LESS_EQUAL, GREATER, GREATER_EQUAL, }) # Helper functions for getting compiled regular expressions, with the needed # matching function returned directly as a small optimization. # # Use ASCII regex matching on Python 3. It's already the default on Python 2. def _re_match(regex): return re.compile(regex, 0 if _IS_PY2 else re.ASCII).match def _re_search(regex): return re.compile(regex, 0 if _IS_PY2 else re.ASCII).search # Various regular expressions used during parsing # The initial token on a line. Also eats leading and trailing whitespace, so # that we can jump straight to the next token (or to the end of the line if # there is only one token). # # This regex will also fail to match for empty lines and comment lines. # # '$' is included to detect preprocessor variable assignments with macro # expansions in the left-hand side. _command_match = _re_match(r"\s*([A-Za-z0-9_$-]+)\s*") # An identifier/keyword after the first token. Also eats trailing whitespace. # '$' is included to detect identifiers containing macro expansions. _id_keyword_match = _re_match(r"([A-Za-z0-9_$/.-]+)\s*") # A fragment in the left-hand side of a preprocessor variable assignment. These # are the portions between macro expansions ($(foo)). Macros are supported in # the LHS (variable name). _assignment_lhs_fragment_match = _re_match("[A-Za-z0-9_-]*") # The assignment operator and value (right-hand side) in a preprocessor # variable assignment _assignment_rhs_match = _re_match(r"\s*(=|:=|\+=)\s*(.*)") # Special characters/strings while expanding a macro ('(', ')', ',', and '$(') _macro_special_search = _re_search(r"\(|\)|,|\$\(") # Special characters/strings while expanding a string (quotes, '\', and '$(') _string_special_search = _re_search(r'"|\'|\\|\$\(') # Special characters/strings while expanding a symbol name. Also includes # end-of-line, in case the macro is the last thing on the line. _name_special_search = _re_search(r'[^A-Za-z0-9_$/.-]|\$\(|$') # A valid right-hand side for an assignment to a string symbol in a .config # file, including escaped characters. Extracts the contents. _conf_string_match = _re_match(r'"((?:[^\\"]|\\.)*)"')
apache-2.0
yugangw-msft/azure-cli
scripts/curl_install_pypi/install.py
2
16843
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # # This script will install the CLI into a directory and create an executable # at a specified file path that is the entry point into the CLI. # # The latest versions of all CLI command packages will be installed. # #pylint: disable=line-too-long import os import sys import platform import stat import tarfile import tempfile import shutil import subprocess import hashlib try: # Attempt to load python 3 module from urllib.request import urlopen except ImportError: # Import python 2 version from urllib2 import urlopen try: # Rename raw_input to input to support Python 2 input = raw_input except NameError: # Python 3 doesn't have raw_input pass AZ_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m azure.cli "$@" """ VIRTUALENV_VERSION = '16.7.7' VIRTUALENV_ARCHIVE = 'virtualenv-'+VIRTUALENV_VERSION+'.tar.gz' VIRTUALENV_DOWNLOAD_URL = 'https://pypi.python.org/packages/source/v/virtualenv/'+VIRTUALENV_ARCHIVE VIRTUALENV_ARCHIVE_SHA256 = 'd257bb3773e48cac60e475a19b608996c73f4d333b3ba2e4e57d5ac6134e0136' DEFAULT_INSTALL_DIR = os.path.expanduser(os.path.join('~', 'lib', 'azure-cli')) DEFAULT_EXEC_DIR = os.path.expanduser(os.path.join('~', 'bin')) EXECUTABLE_NAME = 'az' USER_BASH_RC = os.path.expanduser(os.path.join('~', '.bashrc')) USER_BASH_PROFILE = os.path.expanduser(os.path.join('~', '.bash_profile')) COMPLETION_FILENAME = 'az.completion' PYTHON_ARGCOMPLETE_CODE = """ _python_argcomplete() { local IFS='\v' COMPREPLY=( $(IFS="$IFS" COMP_LINE="$COMP_LINE" COMP_POINT="$COMP_POINT" _ARGCOMPLETE_COMP_WORDBREAKS="$COMP_WORDBREAKS" _ARGCOMPLETE=1 "$1" 8>&1 9>&2 1>/dev/null 2>/dev/null) ) if [[ $? != 0 ]]; then unset COMPREPLY fi } complete -o nospace -o default -o bashdefault -F _python_argcomplete "az" """ class CLIInstallError(Exception): pass def print_status(msg=''): print('-- '+msg) def prompt_input(msg): return input('\n===> '+msg) def prompt_input_with_default(msg, default): if default: return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default else: return prompt_input('{}: '.format(msg)) def prompt_y_n(msg, default=None): if default not in [None, 'y', 'n']: raise ValueError("Valid values for default are 'y', 'n' or None") y = 'Y' if default == 'y' else 'y' n = 'N' if default == 'n' else 'n' while True: ans = prompt_input('{} ({}/{}): '.format(msg, y, n)) if ans.lower() == n.lower(): return False if ans.lower() == y.lower(): return True if default and not ans: return default == y.lower() def exec_command(command_list, cwd=None, env=None): print_status('Executing: '+str(command_list)) subprocess.check_call(command_list, cwd=cwd, env=env) def create_tmp_dir(): tmp_dir = tempfile.mkdtemp() return tmp_dir def create_dir(dir): if not os.path.isdir(dir): print_status("Creating directory '{}'.".format(dir)) os.makedirs(dir) def is_valid_sha256sum(a_file, expected_sum): sha256 = hashlib.sha256() with open(a_file, 'rb') as f: sha256.update(f.read()) computed_hash = sha256.hexdigest() return expected_sum == computed_hash def create_virtualenv(tmp_dir, install_dir): download_location = os.path.join(tmp_dir, VIRTUALENV_ARCHIVE) print_status('Downloading virtualenv package from {}.'.format(VIRTUALENV_DOWNLOAD_URL)) response = urlopen(VIRTUALENV_DOWNLOAD_URL) with open(download_location, 'wb') as f: f.write(response.read()) print_status("Downloaded virtualenv package to {}.".format(download_location)) if is_valid_sha256sum(download_location, VIRTUALENV_ARCHIVE_SHA256): print_status("Checksum of {} OK.".format(download_location)) else: raise CLIInstallError("The checksum of the downloaded virtualenv package does not match.") print_status("Extracting '{}' to '{}'.".format(download_location, tmp_dir)) package_tar = tarfile.open(download_location) package_tar.extractall(path=tmp_dir) package_tar.close() virtualenv_dir_name = 'virtualenv-'+VIRTUALENV_VERSION working_dir = os.path.join(tmp_dir, virtualenv_dir_name) cmd = [sys.executable, 'virtualenv.py', '--python', sys.executable, install_dir] exec_command(cmd, cwd=working_dir) def install_cli(install_dir, tmp_dir): path_to_pip = os.path.join(install_dir, 'bin', 'pip') cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'azure-cli', '--upgrade'] exec_command(cmd) def create_executable(exec_dir, install_dir): create_dir(exec_dir) exec_filepath = os.path.join(exec_dir, EXECUTABLE_NAME) with open(exec_filepath, 'w') as exec_file: exec_file.write(AZ_DISPATCH_TEMPLATE.format(install_dir=install_dir)) cur_stat = os.stat(exec_filepath) os.chmod(exec_filepath, cur_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) print_status("The executable is available at '{}'.".format(exec_filepath)) return exec_filepath def get_install_dir(): install_dir = None while not install_dir: prompt_message = 'In what directory would you like to place the install?' install_dir = prompt_input_with_default(prompt_message, DEFAULT_INSTALL_DIR) install_dir = os.path.realpath(os.path.expanduser(install_dir)) if ' ' in install_dir: print_status("The install directory '{}' cannot contain spaces.".format(install_dir)) install_dir = None else: create_dir(install_dir) if os.listdir(install_dir): print_status("'{}' is not empty and may contain a previous installation.".format(install_dir)) ans_yes = prompt_y_n('Remove this directory?', 'n') if ans_yes: shutil.rmtree(install_dir) print_status("Deleted '{}'.".format(install_dir)) create_dir(install_dir) else: # User opted to not delete the directory so ask for install directory again install_dir = None print_status("We will install at '{}'.".format(install_dir)) return install_dir def get_exec_dir(): exec_dir = None while not exec_dir: prompt_message = "In what directory would you like to place the '{}' executable?".format(EXECUTABLE_NAME) exec_dir = prompt_input_with_default(prompt_message, DEFAULT_EXEC_DIR) exec_dir = os.path.realpath(os.path.expanduser(exec_dir)) if ' ' in exec_dir: print_status("The executable directory '{}' cannot contain spaces.".format(exec_dir)) exec_dir = None create_dir(exec_dir) print_status("The executable will be in '{}'.".format(exec_dir)) return exec_dir def _backup_rc(rc_file): try: shutil.copyfile(rc_file, rc_file+'.backup') print_status("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup')) except (OSError, IOError): pass def _get_default_rc_file(): bashrc_exists = os.path.isfile(USER_BASH_RC) bash_profile_exists = os.path.isfile(USER_BASH_PROFILE) if not bashrc_exists and bash_profile_exists: return USER_BASH_PROFILE if bashrc_exists and bash_profile_exists and platform.system().lower() == 'darwin': return USER_BASH_PROFILE return USER_BASH_RC if bashrc_exists else None def _default_rc_file_creation_step(): rcfile = USER_BASH_PROFILE if platform.system().lower() == 'darwin' else USER_BASH_RC ans_yes = prompt_y_n('Could not automatically find a suitable file to use. Create {} now?'.format(rcfile), default='y') if ans_yes: open(rcfile, 'a').close() return rcfile return None def _find_line_in_file(file_path, search_pattern): try: with open(file_path, 'r', encoding="utf-8") as search_file: for line in search_file: if search_pattern in line: return True except (OSError, IOError): pass return False def _modify_rc(rc_file_path, line_to_add): if not _find_line_in_file(rc_file_path, line_to_add): with open(rc_file_path, 'a', encoding="utf-8") as rc_file: rc_file.write('\n'+line_to_add+'\n') def create_tab_completion_file(filename): with open(filename, 'w') as completion_file: completion_file.write(PYTHON_ARGCOMPLETE_CODE) print_status("Created tab completion file at '{}'".format(filename)) def get_rc_file_path(): rc_file = None default_rc_file = _get_default_rc_file() if not default_rc_file: rc_file = _default_rc_file_creation_step() rc_file = rc_file or prompt_input_with_default('Enter a path to an rc file to update', default_rc_file) if rc_file: rc_file_path = os.path.realpath(os.path.expanduser(rc_file)) if os.path.isfile(rc_file_path): return rc_file_path print_status("The file '{}' could not be found.".format(rc_file_path)) return None def warn_other_azs_on_path(exec_dir, exec_filepath): env_path = os.environ.get('PATH') conflicting_paths = [] if env_path: for p in env_path.split(':'): p_to_az = os.path.join(p, EXECUTABLE_NAME) if p != exec_dir and os.path.isfile(p_to_az): conflicting_paths.append(p_to_az) if conflicting_paths: print_status() print_status("** WARNING: Other '{}' executables are on your $PATH. **".format(EXECUTABLE_NAME)) print_status("Conflicting paths: {}".format(', '.join(conflicting_paths))) print_status("You can run this installation of the CLI with '{}'.".format(exec_filepath)) def handle_path_and_tab_completion(completion_file_path, exec_filepath, exec_dir): ans_yes = prompt_y_n('Modify profile to update your $PATH and enable shell/tab completion now?', 'y') if ans_yes: rc_file_path = get_rc_file_path() if not rc_file_path: raise CLIInstallError('No suitable profile file found.') _backup_rc(rc_file_path) line_to_add = "export PATH=$PATH:{}".format(exec_dir) _modify_rc(rc_file_path, line_to_add) line_to_add = "source '{}'".format(completion_file_path) _modify_rc(rc_file_path, line_to_add) print_status('Tab completion set up complete.') print_status("If tab completion is not activated, verify that '{}' is sourced by your shell.".format(rc_file_path)) warn_other_azs_on_path(exec_dir, exec_filepath) print_status() print_status('** Run `exec -l $SHELL` to restart your shell. **') print_status() else: print_status("If you change your mind, add 'source {}' to your rc file and restart your shell to enable tab completion.".format(completion_file_path)) print_status("You can run the CLI with '{}'.".format(exec_filepath)) def verify_python_version(): print_status('Verifying Python version.') v = sys.version_info if v < (2, 7): raise CLIInstallError('The CLI does not support Python versions less than 2.7.') if 'conda' in sys.version: raise CLIInstallError("This script does not support the Python Anaconda environment. " "Create an Anaconda virtual environment and install with 'pip'") print_status('Python version {}.{}.{} okay.'.format(v.major, v.minor, v.micro)) def _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list): try: print_status("Executing: '{} {}'".format(' '.join(verify_cmd_args), ' '.join(dep_list))) subprocess.check_output(verify_cmd_args + dep_list, stderr=subprocess.STDOUT) print_status('Native dependencies okay.') except subprocess.CalledProcessError: err_msg = 'One or more of the following native dependencies are not currently installed and may be required.\n' err_msg += '"{}"'.format(' '.join(install_cmd_args + dep_list)) print_status(err_msg) ans_yes = prompt_y_n('Missing native dependencies. Attempt to continue anyway?', 'n') if not ans_yes: raise CLIInstallError('Please install the native dependencies and try again.') def _get_linux_distro(): if platform.system() != 'Linux': return None, None try: with open('/etc/os-release') as lines: tokens = [line.strip() for line in lines] except Exception as e: return None, None release_info = {} for token in tokens: if '=' in token: k, v = token.split('=', 1) release_info[k.lower()] = v.strip('"') return release_info.get('name', None), release_info.get('version_id', None) def verify_native_dependencies(): distname, version = _get_linux_distro() if not distname: # There's no distribution name so can't determine native dependencies required / or they may not be needed like on OS X return print_status('Verifying native dependencies.') is_python3 = sys.version_info[0] == 3 distname = distname.lower().strip() verify_cmd_args, install_cmd_args, dep_list = None, None, None if any(x in distname for x in ['ubuntu', 'debian']): verify_cmd_args = ['dpkg', '-s'] install_cmd_args = ['apt-get', 'update', '&&', 'apt-get', 'install', '-y'] python_dep = 'python3-dev' if is_python3 else 'python-dev' if distname == 'ubuntu' and version in ['12.04', '14.04'] or distname == 'debian' and version.startswith('7'): dep_list = ['libssl-dev', 'libffi-dev', python_dep] elif distname == 'ubuntu' and version in ['15.10', '16.04', '18.04']or distname == 'debian' and version.startswith('8'): dep_list = ['libssl-dev', 'libffi-dev', python_dep, 'build-essential'] elif any(x in distname for x in ['centos', 'rhel', 'red hat']): verify_cmd_args = ['rpm', '-q'] install_cmd_args = ['yum', 'check-update', ';', 'yum', 'install', '-y'] # python3-devel not available on yum but python3Xu-devel versions available. python_dep = 'python3{}u-devel'.format(sys.version_info[1]) if is_python3 else 'python-devel' dep_list = ['gcc', 'libffi-devel', python_dep, 'openssl-devel'] elif any(x in distname for x in ['opensuse', 'suse', 'sles']): verify_cmd_args = ['rpm', '-q'] install_cmd_args = ['zypper', 'refresh', '&&', 'zypper', '--non-interactive', 'install'] python_dep = 'python3-devel' if is_python3 else 'python-devel' dep_list = ['gcc', 'libffi-devel', python_dep, 'libopenssl-devel'] if verify_cmd_args and install_cmd_args and dep_list: _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list) else: print_status("Unable to verify native dependencies. dist={}, version={}. Continuing...".format(distname, version)) def verify_install_dir_exec_path_conflict(install_dir, exec_path): if install_dir == exec_path: raise CLIInstallError("The executable file '{}' would clash with the install directory of '{}'. Choose either a different install directory or directory to place the executable.".format(exec_path, install_dir)) def main(): verify_python_version() verify_native_dependencies() tmp_dir = create_tmp_dir() install_dir = get_install_dir() exec_dir = get_exec_dir() exec_path = os.path.join(exec_dir, EXECUTABLE_NAME) verify_install_dir_exec_path_conflict(install_dir, exec_path) create_virtualenv(tmp_dir, install_dir) install_cli(install_dir, tmp_dir) exec_filepath = create_executable(exec_dir, install_dir) completion_file_path = os.path.join(install_dir, COMPLETION_FILENAME) create_tab_completion_file(completion_file_path) try: handle_path_and_tab_completion(completion_file_path, exec_filepath, exec_dir) except Exception as e: print_status("Unable to set up tab completion. ERROR: {}".format(str(e))) shutil.rmtree(tmp_dir) print_status("Installation successful.") print_status("Run the CLI with {} --help".format(exec_filepath)) if __name__ == '__main__': try: main() except CLIInstallError as cie: print('ERROR: '+str(cie), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: print('\n\nExiting...') sys.exit(1)
mit
TAMU-CPT/galaxy-tools
tools/efetch/attic/cpt-efetch_OBSOLETE.py
1
4271
import argparse import sys from time import sleep from Bio import Entrez from Bio import SeqIO import urllib # Not very DRY, but, I think this will get the job done and be easy to read/alter for anyone in the future. def do_multi(user_input,db,sleep_amt,ret_type): """ Formats a multi-fasta/gbk """ acc_list = user_input try: if ret_type == "genbank": net_handle = Entrez.efetch(db=db,id=acc_list,rettype="gb",retmode="text") else: net_handle = Entrez.efetch(db=db,id=acc_list,rettype="fasta",retmode="text") except urllib.error.HTTPError: # <-- This is the response error if you get timed out, if it occurs, sleep and send the request again sleep(sleep_amt) if ret_type == "genbank": net_handle = Entrez.efetch(db=db,id=acc_list,rettype="gb",retmode="text") else: net_handle = Entrez.efetch(db=db,id=acc_list,rettype="fasta",retmode="text") records = SeqIO.parse(net_handle,ret_type) with open(f"multi{ret_type}.{ret_type}", "w") as file: for record in records: SeqIO.write(record, file, ret_type) return print("...File(s) fetched and downloaded...") def do_individuals(user_input,db,sleep_amt,ret_type): """ returns individual fasta/gbk files """ acc_list = user_input for acc in acc_list: try: if ret_type == "genbank": net_handle = Entrez.efetch(db=db,id=acc,rettype="gbwithparts", retmode="text") else: net_handle = Entrez.efetch(db=db,id=acc,rettype=ret_type, retmode="text") except urllib.error.HTTPError: # <-- This is the response error if you get timed out, if it occurs, sleep and send the request again sleep(sleep_amt) if ret_type == "genbank": net_handle = Entrez.efetch(db=db,id=acc,rettype="gbwithparts", retmode="text") else: net_handle = Entrez.efetch(db=db,id=acc,rettype=ret_type, retmode="text") record = net_handle.read() print(record) with open(f"{acc}.{ret_type}","w") as file: file.write(record) net_handle.close() file.close() return print("...File(s) fetched and downloaded...") if __name__ == "__main__": ##### Arguments parser = argparse.ArgumentParser(description="CPT's very own modified Efetch") parser.add_argument("email", type=str, help="Entrez Required Email") # current place holder until I determine how best to use the current user's email from Galaxy parser.add_argument("--input", type=str, nargs="*", help='accession input"') parser.add_argument("--db", type=str, choices=("protein", "nuccore"), help="choose protein or nuccore database to do query") parser.add_argument("--ret_format", type=str, choices=("multi","individual","both"), default="individual", help="choose between having a multi-fa/gbk, invidual, or both for the output") parser.add_argument("--ret_type", choices=("fasta","genbank"), help="return format of file") parser.add_argument("--sleep", type=int, default=20, help="Amount to delay a query to NCBI by") parser.add_argument("--output", type=argparse.FileType("w+"), default="output.dat") args = parser.parse_args() Entrez.email = args.email if args.ret_format == "multi": do_multi(user_input=args.input,db=args.db,ret_type=args.ret_type,sleep_amt=args.sleep) elif args.ret_format == "individual": do_individuals(user_input=args.input,db=args.db,ret_type=args.ret_type,sleep_amt=args.sleep) elif args.ret_format == "both": do_multi(user_input=args.input,db=args.db,ret_type=args.ret_type,sleep_amt=args.sleep) do_individuals(user_input=args.input,db=args.db,ret_type=args.ret_type,sleep_amt=args.sleep)
gpl-3.0
the-archer/ai-project
TrafficModel/roads.py
1
1433
from road import Road import csv import math class Roads(): def __init__(self,roadsList): self.roads =[] for itr in roadsList: self.roads.append(Road(itr[0],itr[1],itr[2],itr[3],itr[4],itr[5],itr[6], itr[7], itr[8])) def SignalCheck(self,itr): if (itr+1)%20 == 0: #check itr logic for road in self.roads: if road.Signal == "Red": road.Signal="Green" else : road.Signal="Red" def UpdateRoads(self,Time): for road in self.roads: road.trafficIterator(Time) def getCurrentState(self): state = () for road in self.roads: no_of_vehicles = road.getNoOfVehicles() print (no_of_vehicles) if no_of_vehicles<100: st = no_of_vehicles/10 else: st = 10 state += (st, ) return state def updateSignals(self, signal): for i in range(0, len(self.roads)): if signal[i]==0: self.roads[i].Signal="Red" else: self.roads[i].Signal="Green" return def getTotalDelay(self): delay=0 for road in self.roads: delay += road.getRoadDelay() return delay # def getAvgDelay(self): # delay = 0 # for road in self.roads: # if road.getNoOfVehicles() > 0: # delay += float(road.getRoadDelay())/road.getNoOfVehicles() # return delay def getTotalNoOfVehicles(self): vehicles_count = 0 for road in self.roads: vehicles_count += road.getNoOfVehicles() return vehicles_count
gpl-2.0
mrtukkin/svm-street-detector
devkit_kitti/transform2BEV.py
1
4409
#!/usr/bin/env python # # THE KITTI VISION BENCHMARK SUITE: ROAD BENCHMARK # # Copyright (C) 2013 # Honda Research Institute Europe GmbH # Carl-Legien-Str. 30 # 63073 Offenbach/Main # Germany # # UNPUBLISHED PROPRIETARY MATERIAL. # ALL RIGHTS RESERVED. # # Authors: Tobias Kuehnl <tkuehnl@cor-lab.uni-bielefeld.de> # Jannik Fritsch <jannik.fritsch@honda-ri.de> # from BirdsEyeView import BirdsEyeView from glob import glob import os,sys import cv2 # OpenCV ######################################################################### # function that does the transformation: Image --> BirdsEyeView ######################################################################### def main(dataFiles, pathToCalib, outputPath, calib_end = '.txt'): ''' Main method of transform2BEV :param dataFiles: the files you want to transform to BirdsEyeView, e.g., /home/elvis/kitti_road/data/*.png :param pathToCalib: containing calib data as txt-files, e.g., /home/elvis/kitti_road/calib/ :param outputPath: where the BirdsEyeView data will be saved, e.g., /home/elvis/kitti_road/data_bev :param calib_end: file extension of calib-files (OPTIONAL) ''' # Extract path of data pathToData = os.path.split(dataFiles)[0] assert os.path.isdir(pathToData), "The directory containig the input data seems to not exist!" assert os.path.isdir(pathToCalib), "Error <PathToCalib> does not exist" # BEV class bev = BirdsEyeView() #check if not os.path.isdir(outputPath): os.makedirs(outputPath) # get filelist fileList_data = glob(dataFiles) assert len(fileList_data), 'Could not find files in: %s' %pathToData # Loop over all files for aFile in fileList_data: assert os.path.isfile(aFile), '%s is not a file' %aFile file_key = aFile.split('/')[-1].split('.')[0] print "Transforming file %s to Birds Eye View " %file_key tags = file_key.split('_') data_end = aFile.split(file_key)[-1] #calibration filename calib_file = os.path.join(pathToCalib, file_key + calib_end) if not os.path.isfile(calib_file) and len(tags)==3: # exclude lane or road from filename! calib_file = os.path.join(pathToCalib, tags[0]+ '_' + tags[2] + calib_end) # Check if calb file exist! if not os.path.isfile(calib_file): print "Cannot find calib file: %s" %calib_file print "Attention: It is assumed that input data and calib files have the same name (only different extension)!" sys.exit(1) # Update calibration for Birds Eye View bev.setup(calib_file) # Read image data = cv2.imread(aFile, cv2.CV_LOAD_IMAGE_UNCHANGED) # Compute Birds Eye View data_bev = bev.compute(data) # Write output (BEV) fn_out = os.path.join(outputPath,file_key + data_end) if (cv2.imwrite(fn_out, data_bev)): print "done ..." else: print "saving to %s failed ... (permissions?)"%outputPath return print "BirdsEyeView was stored in: %s" %outputPath ######################################################################### # transformation script Image --> BirdsEyeView ######################################################################### if __name__ == "__main__": print sys.argv # check for correct number of arguments. if len(sys.argv)!=4: print "Usage: python transform2BEV.py <InputFiles> <PathToCalib> <OutputPath> " print "<InputFiles>: the files you want to transform to BirdsEyeView, e.g., '/home/elvis/kitti_road/data/*.png' (use quotes!)" print "<PathToCalib>: containing calib data as calib-files, e.g., /home/elvis/kitti_road/calib/" print "<OutputPath>: where the BirdsEyeView data will be saved, e.g., /home/elvis/kitti_road/data_bev" print "ATTENTION: It is assumed that input data and calib files have the same name (only different extension)!" print "Your provided parameters: ", sys.argv sys.exit(1) # parse parameters dataFiles = sys.argv[1] pathToCalib = sys.argv[2] outputPath = sys.argv[3] # Excecute main fun main(dataFiles, pathToCalib, outputPath)
gpl-3.0
peterli2012/ipmanagement
WebContent/jslib/flatlab/assets/file-uploader/server/gae-python/main.py
242
5844
# -*- coding: utf-8 -*- # # jQuery File Upload Plugin GAE Python Example 2.1.1 # https://github.com/blueimp/jQuery-File-Upload # # Copyright 2011, Sebastian Tschan # https://blueimp.net # # Licensed under the MIT license: # http://www.opensource.org/licenses/MIT # from __future__ import with_statement from google.appengine.api import files, images from google.appengine.ext import blobstore, deferred from google.appengine.ext.webapp import blobstore_handlers import json import re import urllib import webapp2 WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/' MIN_FILE_SIZE = 1 # bytes MAX_FILE_SIZE = 5000000 # bytes IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)') ACCEPT_FILE_TYPES = IMAGE_TYPES THUMBNAIL_MODIFICATOR = '=s80' # max width / height EXPIRATION_TIME = 300 # seconds def cleanup(blob_keys): blobstore.delete(blob_keys) class UploadHandler(webapp2.RequestHandler): def initialize(self, request, response): super(UploadHandler, self).initialize(request, response) self.response.headers['Access-Control-Allow-Origin'] = '*' self.response.headers[ 'Access-Control-Allow-Methods' ] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE' self.response.headers[ 'Access-Control-Allow-Headers' ] = 'Content-Type, Content-Range, Content-Disposition' def validate(self, file): if file['size'] < MIN_FILE_SIZE: file['error'] = 'File is too small' elif file['size'] > MAX_FILE_SIZE: file['error'] = 'File is too big' elif not ACCEPT_FILE_TYPES.match(file['type']): file['error'] = 'Filetype not allowed' else: return True return False def get_file_size(self, file): file.seek(0, 2) # Seek to the end of the file size = file.tell() # Get the position of EOF file.seek(0) # Reset the file position to the beginning return size def write_blob(self, data, info): blob = files.blobstore.create( mime_type=info['type'], _blobinfo_uploaded_filename=info['name'] ) with files.open(blob, 'a') as f: f.write(data) files.finalize(blob) return files.blobstore.get_blob_key(blob) def handle_upload(self): results = [] blob_keys = [] for name, fieldStorage in self.request.POST.items(): if type(fieldStorage) is unicode: continue result = {} result['name'] = re.sub( r'^.*\\', '', fieldStorage.filename ) result['type'] = fieldStorage.type result['size'] = self.get_file_size(fieldStorage.file) if self.validate(result): blob_key = str( self.write_blob(fieldStorage.value, result) ) blob_keys.append(blob_key) result['deleteType'] = 'DELETE' result['deleteUrl'] = self.request.host_url +\ '/?key=' + urllib.quote(blob_key, '') if (IMAGE_TYPES.match(result['type'])): try: result['url'] = images.get_serving_url( blob_key, secure_url=self.request.host_url.startswith( 'https' ) ) result['thumbnailUrl'] = result['url'] +\ THUMBNAIL_MODIFICATOR except: # Could not get an image serving url pass if not 'url' in result: result['url'] = self.request.host_url +\ '/' + blob_key + '/' + urllib.quote( result['name'].encode('utf-8'), '') results.append(result) deferred.defer( cleanup, blob_keys, _countdown=EXPIRATION_TIME ) return results def options(self): pass def head(self): pass def get(self): self.redirect(WEBSITE) def post(self): if (self.request.get('_method') == 'DELETE'): return self.delete() result = {'files': self.handle_upload()} s = json.dumps(result, separators=(',', ':')) redirect = self.request.get('redirect') if redirect: return self.redirect(str( redirect.replace('%s', urllib.quote(s, ''), 1) )) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s) def delete(self): key = self.request.get('key') or '' blobstore.delete(key) s = json.dumps({key: True}, separators=(',', ':')) if 'application/json' in self.request.headers.get('Accept'): self.response.headers['Content-Type'] = 'application/json' self.response.write(s) class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler): def get(self, key, filename): if not blobstore.get(key): self.error(404) else: # Prevent browsers from MIME-sniffing the content-type: self.response.headers['X-Content-Type-Options'] = 'nosniff' # Cache for the expiration time: self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME # Send the file forcing a download dialog: self.send_blob(key, save_as=filename, content_type='application/octet-stream') app = webapp2.WSGIApplication( [ ('/', UploadHandler), ('/([^/]+)/([^/]+)', DownloadHandler) ], debug=True )
apache-2.0
xianggong/m2c_unit_test
test/integer/mad_hi_char4char4char4/compile.py
1861
4430
#!/usr/bin/python import os import subprocess import re def runCommand(command): p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) p.wait() return iter(p.stdout.readline, b'') def dumpRunCommand(command, dump_file_name, postfix): dumpFile = open(dump_file_name + postfix, "w+") dumpFile.write(command + "\n") for line in runCommand(command.split()): dumpFile.write(line) def rmFile(file_name): cmd = "rm -rf " + file_name runCommand(cmd.split()) def rnm_ir(file_name): # Append all unnamed variable with prefix 'tmp_' ir_file_name = file_name + ".ll" if os.path.isfile(ir_file_name): fo = open(ir_file_name, "rw+") lines = fo.readlines() fo.seek(0) fo.truncate() for line in lines: # Add entry block identifier if "define" in line: line += "entry:\n" # Rename all unnamed variables line = re.sub('\%([0-9]+)', r'%tmp_\1', line.rstrip()) # Also rename branch name line = re.sub('(\;\ \<label\>\:)([0-9]+)', r'tmp_\2:', line.rstrip()) fo.write(line + '\n') def gen_ir(file_name): # Directories root_dir = '../../../' header_dir = root_dir + "inc/" # Headers header = " -I " + header_dir header += " -include " + header_dir + "m2c_buildin_fix.h " header += " -include " + header_dir + "clc/clc.h " header += " -D cl_clang_storage_class_specifiers " gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde " cmd_gen_ir = gen_ir + header + file_name + ".cl" dumpRunCommand(cmd_gen_ir, file_name, ".clang.log") def asm_ir(file_name): if os.path.isfile(file_name + ".ll"): # Command to assemble IR to bitcode gen_bc = "llvm-as " gen_bc_src = file_name + ".ll" gen_bc_dst = file_name + ".bc" cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst runCommand(cmd_gen_bc.split()) def opt_bc(file_name): if os.path.isfile(file_name + ".bc"): # Command to optmize bitcode opt_bc = "opt --mem2reg " opt_ir_src = file_name + ".bc" opt_ir_dst = file_name + ".opt.bc" cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst runCommand(cmd_opt_bc.split()) def dis_bc(file_name): if os.path.isfile(file_name + ".bc"): # Command to disassemble bitcode dis_bc = "llvm-dis " dis_ir_src = file_name + ".opt.bc" dis_ir_dst = file_name + ".opt.ll" cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst runCommand(cmd_dis_bc.split()) def m2c_gen(file_name): if os.path.isfile(file_name + ".opt.bc"): # Command to disassemble bitcode m2c_gen = "m2c --llvm2si " m2c_gen_src = file_name + ".opt.bc" cmd_m2c_gen = m2c_gen + m2c_gen_src dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log") # Remove file if size is 0 if os.path.isfile(file_name + ".opt.s"): if os.path.getsize(file_name + ".opt.s") == 0: rmFile(file_name + ".opt.s") def m2c_bin(file_name): if os.path.isfile(file_name + ".opt.s"): # Command to disassemble bitcode m2c_bin = "m2c --si2bin " m2c_bin_src = file_name + ".opt.s" cmd_m2c_bin = m2c_bin + m2c_bin_src dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log") def main(): # Commands for file in os.listdir("./"): if file.endswith(".cl"): file_name = os.path.splitext(file)[0] # Execute commands gen_ir(file_name) rnm_ir(file_name) asm_ir(file_name) opt_bc(file_name) dis_bc(file_name) m2c_gen(file_name) m2c_bin(file_name) if __name__ == "__main__": main()
gpl-2.0
tibbe/ghc
testsuite/driver/runtests.py
7
10195
# # (c) Simon Marlow 2002 # import sys import os import string import getopt import platform import time import re # We don't actually need subprocess in runtests.py, but: # * We do need it in testlibs.py # * We can't import testlibs.py until after we have imported ctypes # * If we import ctypes before subprocess on cygwin, then sys.exit(0) # says "Aborted" and we fail with exit code 134. # So we import it here first, so that the testsuite doesn't appear to fail. try: import subprocess except: pass from testutil import * from testglobals import * # Readline sometimes spews out ANSI escapes for some values of TERM, # which result in test failures. Thus set TERM to a nice, simple, safe # value. os.environ['TERM'] = 'vt100' global config config = getConfig() # get it from testglobals # ----------------------------------------------------------------------------- # cmd-line options long_options = [ "config=", # config file "rootdir=", # root of tree containing tests (default: .) "output-summary=", # file in which to save the (human-readable) summary "only=", # just this test (can be give multiple --only= flags) "way=", # just this way "skipway=", # skip this way "threads=", # threads to run simultaneously "check-files-written", # check files aren't written by multiple tests "verbose=", # verbose (0,1,2 so far) "skip-perf-tests", # skip performance tests ] opts, args = getopt.getopt(sys.argv[1:], "e:", long_options) for opt,arg in opts: if opt == '--config': execfile(arg) # -e is a string to execute from the command line. For example: # testframe -e 'config.compiler=ghc-5.04' if opt == '-e': exec arg if opt == '--rootdir': config.rootdirs.append(arg) if opt == '--output-summary': config.output_summary = arg if opt == '--only': config.only.append(arg) if opt == '--way': if (arg not in config.run_ways and arg not in config.compile_ways and arg not in config.other_ways): sys.stderr.write("ERROR: requested way \'" + arg + "\' does not exist\n") sys.exit(1) config.cmdline_ways = [arg] + config.cmdline_ways if (arg in config.other_ways): config.run_ways = [arg] + config.run_ways config.compile_ways = [arg] + config.compile_ways if opt == '--skipway': if (arg not in config.run_ways and arg not in config.compile_ways and arg not in config.other_ways): sys.stderr.write("ERROR: requested way \'" + arg + "\' does not exist\n") sys.exit(1) config.other_ways = filter(neq(arg), config.other_ways) config.run_ways = filter(neq(arg), config.run_ways) config.compile_ways = filter(neq(arg), config.compile_ways) if opt == '--threads': config.threads = int(arg) config.use_threads = 1 if opt == '--check-files-written': config.check_files_written = True if opt == '--skip-perf-tests': config.skip_perf_tests = True if opt == '--verbose': if arg not in ["0","1","2","3"]: sys.stderr.write("ERROR: requested verbosity %s not supported, use 0,1,2 or 3" % arg) sys.exit(1) config.verbose = int(arg) if config.use_threads == 1: # Trac #1558 says threads don't work in python 2.4.4, but do # in 2.5.2. Probably >= 2.5 is sufficient, but let's be # conservative here. # Some versions of python have things like '1c1' for some of # these components (see trac #3091), but int() chokes on the # 'c1', so we drop it. (maj, min, pat) = platform.python_version_tuple() # We wrap maj, min, and pat in str() to work around a bug in python # 2.6.1 maj = int(re.sub('[^0-9].*', '', str(maj))) min = int(re.sub('[^0-9].*', '', str(min))) pat = int(re.sub('[^0-9].*', '', str(pat))) if (maj, min, pat) < (2, 5, 2): print "Warning: Ignoring request to use threads as python version < 2.5.2" config.use_threads = 0 # We also need to disable threads for python 2.7.2, because of # this bug: http://bugs.python.org/issue13817 elif (maj, min, pat) == (2, 7, 2): print "Warning: Ignoring request to use threads as python version is 2.7.2" print "See http://bugs.python.org/issue13817 for details." config.use_threads = 0 if windows: print "Warning: Ignoring request to use threads as running on Windows" config.use_threads = 0 config.cygwin = False config.msys = False if windows: h = os.popen('uname -s', 'r') v = h.read() h.close() if v.startswith("CYGWIN"): config.cygwin = True elif v.startswith("MINGW"): # msys gives "MINGW32" # msys2 gives "MINGW_NT-6.2" config.msys = True else: raise Exception("Can't detect Windows terminal type") # Try to use UTF8 if windows: import ctypes if config.cygwin: # Is this actually right? Which calling convention does it use? # As of the time of writing, ctypes.windll doesn't exist in the # cygwin python, anyway. mydll = ctypes.cdll else: mydll = ctypes.windll # This actually leaves the terminal in codepage 65001 (UTF8) even # after python terminates. We ought really remember the old codepage # and set it back. if mydll.kernel32.SetConsoleCP(65001) == 0: raise Exception("Failure calling SetConsoleCP(65001)") if mydll.kernel32.SetConsoleOutputCP(65001) == 0: raise Exception("Failure calling SetConsoleOutputCP(65001)") else: # Try and find a utf8 locale to use # First see if we already have a UTF8 locale h = os.popen('locale | grep LC_CTYPE | grep -i utf', 'r') v = h.read() h.close() if v == '': # We don't, so now see if 'locale -a' works h = os.popen('locale -a', 'r') v = h.read() h.close() if v != '': # If it does then use the first utf8 locale that is available h = os.popen('locale -a | grep -i "utf8\|utf-8" 2>/dev/null', 'r') v = h.readline().strip() h.close() if v != '': os.environ['LC_ALL'] = v print "setting LC_ALL to", v else: print 'WARNING: No UTF8 locale found.' print 'You may get some spurious test failures.' # This has to come after arg parsing as the args can change the compiler get_compiler_info() # Can't import this earlier as we need to know if threading will be # enabled or not from testlib import * # On Windows we need to set $PATH to include the paths to all the DLLs # in order for the dynamic library tests to work. if windows or darwin: pkginfo = getStdout([config.ghc_pkg, 'dump']) topdir = config.libdir for line in pkginfo.split('\n'): if line.startswith('library-dirs:'): path = line.rstrip() path = re.sub('^library-dirs: ', '', path) path = re.sub('\\$topdir', topdir, path) if path.startswith('"'): path = re.sub('^"(.*)"$', '\\1', path) path = re.sub('\\\\(.)', '\\1', path) if windows: if config.cygwin: # On cygwin we can't put "c:\foo" in $PATH, as : is a # field separator. So convert to /cygdrive/c/foo instead. # Other pythons use ; as the separator, so no problem. path = re.sub('([a-zA-Z]):', '/cygdrive/\\1', path) path = re.sub('\\\\', '/', path) os.environ['PATH'] = os.pathsep.join([path, os.environ.get("PATH", "")]) else: # darwin os.environ['DYLD_LIBRARY_PATH'] = os.pathsep.join([path, os.environ.get("DYLD_LIBRARY_PATH", "")]) global testopts_local testopts_local.x = TestOptions() if config.use_threads: t.lock = threading.Lock() t.thread_pool = threading.Condition(t.lock) t.lockFilesWritten = threading.Lock() t.running_threads = 0 # if timeout == -1 then we try to calculate a sensible value if config.timeout == -1: config.timeout = int(read_no_crs(config.top + '/timeout/calibrate.out')) print 'Timeout is ' + str(config.timeout) # ----------------------------------------------------------------------------- # The main dude if config.rootdirs == []: config.rootdirs = ['.'] t_files = findTFiles(config.rootdirs) print 'Found', len(t_files), '.T files...' t = getTestRun() # Avoid cmd.exe built-in 'date' command on Windows t.start_time = time.localtime() print 'Beginning test run at', time.strftime("%c %Z",t.start_time) # set stdout to unbuffered (is this the best way to do it?) sys.stdout.flush() sys.stdout = os.fdopen(sys.__stdout__.fileno(), "w", 0) # First collect all the tests to be run for file in t_files: if_verbose(2, '====> Scanning %s' % file) newTestDir(os.path.dirname(file)) try: execfile(file) except: print '*** framework failure: found an error while executing ', file, ':' t.n_framework_failures = t.n_framework_failures + 1 traceback.print_exc() if config.list_broken: global brokens print '' print 'Broken tests:' print (' '.join(map (lambda (b, d, n) : '#' + str(b) + '(' + d + '/' + n + ')', brokens))) print '' if t.n_framework_failures != 0: print 'WARNING:', str(t.n_framework_failures), 'framework failures!' print '' else: # Now run all the tests if config.use_threads: t.running_threads=0 for oneTest in parallelTests: if stopping(): break oneTest() if config.use_threads: t.thread_pool.acquire() while t.running_threads>0: t.thread_pool.wait() t.thread_pool.release() config.use_threads = False for oneTest in aloneTests: if stopping(): break oneTest() summary(t, sys.stdout) if config.output_summary != '': summary(t, open(config.output_summary, 'w')) sys.exit(0)
bsd-3-clause
chyeh727/django
tests/ordering/tests.py
301
10033
from __future__ import unicode_literals from datetime import datetime from operator import attrgetter from django.db.models import F from django.test import TestCase from .models import Article, Author, Reference class OrderingTests(TestCase): def setUp(self): self.a1 = Article.objects.create( headline="Article 1", pub_date=datetime(2005, 7, 26) ) self.a2 = Article.objects.create( headline="Article 2", pub_date=datetime(2005, 7, 27) ) self.a3 = Article.objects.create( headline="Article 3", pub_date=datetime(2005, 7, 27) ) self.a4 = Article.objects.create( headline="Article 4", pub_date=datetime(2005, 7, 28) ) def test_default_ordering(self): """ By default, Article.objects.all() orders by pub_date descending, then headline ascending. """ self.assertQuerysetEqual( Article.objects.all(), [ "Article 4", "Article 2", "Article 3", "Article 1", ], attrgetter("headline") ) # Getting a single item should work too: self.assertEqual(Article.objects.all()[0], self.a4) def test_default_ordering_override(self): """ Override ordering with order_by, which is in the same format as the ordering attribute in models. """ self.assertQuerysetEqual( Article.objects.order_by("headline"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("pub_date", "-headline"), [ "Article 1", "Article 3", "Article 2", "Article 4", ], attrgetter("headline") ) def test_order_by_override(self): """ Only the last order_by has any effect (since they each override any previous ordering). """ self.assertQuerysetEqual( Article.objects.order_by("id"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("id").order_by("-headline"), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_stop_slicing(self): """ Use the 'stop' part of slicing notation to limit the results. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[:2], [ "Article 1", "Article 2", ], attrgetter("headline") ) def test_stop_start_slicing(self): """ Use the 'stop' and 'start' parts of slicing notation to offset the result list. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[1:3], [ "Article 2", "Article 3", ], attrgetter("headline") ) def test_random_ordering(self): """ Use '?' to order randomly. """ self.assertEqual( len(list(Article.objects.order_by("?"))), 4 ) def test_reversed_ordering(self): """ Ordering can be reversed using the reverse() method on a queryset. This allows you to extract things like "the last two items" (reverse and then take the first two). """ self.assertQuerysetEqual( Article.objects.all().reverse()[:2], [ "Article 1", "Article 3", ], attrgetter("headline") ) def test_reverse_ordering_pure(self): qs1 = Article.objects.order_by(F('headline').asc()) qs2 = qs1.reverse() self.assertQuerysetEqual( qs1, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( qs2, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_extra_ordering(self): """ Ordering can be based on fields included from an 'extra' clause """ self.assertQuerysetEqual( Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_quoting(self): """ If the extra clause uses an SQL keyword for a name, it will be protected by quoting. """ self.assertQuerysetEqual( Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_with_table_name(self): self.assertQuerysetEqual( Article.objects.extra(order_by=['ordering_article.headline']), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.extra(order_by=['-ordering_article.headline']), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_pk(self): """ Ensure that 'pk' works as an ordering option in Meta. Refs #8291. """ Author.objects.create(pk=1) Author.objects.create(pk=2) Author.objects.create(pk=3) Author.objects.create(pk=4) self.assertQuerysetEqual( Author.objects.all(), [ 4, 3, 2, 1 ], attrgetter("pk") ) def test_order_by_fk_attname(self): """ Ensure that ordering by a foreign key by its attribute name prevents the query from inheriting it's related model ordering option. Refs #19195. """ for i in range(1, 5): author = Author.objects.create(pk=i) article = getattr(self, "a%d" % (5 - i)) article.author = author article.save(update_fields={'author'}) self.assertQuerysetEqual( Article.objects.order_by('author_id'), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_f_expression(self): self.assertQuerysetEqual( Article.objects.order_by(F('headline')), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').asc()), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').desc()), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_f_expression_duplicates(self): """ A column may only be included once (the first occurrence) so we check to ensure there are no duplicates by inspecting the SQL. """ qs = Article.objects.order_by(F('headline').asc(), F('headline').desc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) qs = Article.objects.order_by(F('headline').desc(), F('headline').asc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_related_ordering_duplicate_table_reference(self): """ An ordering referencing a model with an ordering referencing a model multiple time no circular reference should be detected (#24654). """ first_author = Author.objects.create() second_author = Author.objects.create() self.a1.author = first_author self.a1.second_author = second_author self.a1.save() self.a2.author = second_author self.a2.second_author = first_author self.a2.save() r1 = Reference.objects.create(article_id=self.a1.pk) r2 = Reference.objects.create(article_id=self.a2.pk) self.assertQuerysetEqual(Reference.objects.all(), [r2, r1], lambda x: x)
bsd-3-clause
ms-iot/python
cpython/Lib/distutils/command/install_headers.py
205
1298
"""distutils.command.install_headers Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" from distutils.core import Command # XXX force is never used class install_headers(Command): description = "install C/C++ header files" user_options = [('install-dir=', 'd', "directory to install header files to"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def run(self): headers = self.distribution.headers if not headers: return self.mkpath(self.install_dir) for header in headers: (out, _) = self.copy_file(header, self.install_dir) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles
bsd-3-clause
bonitadecker77/python-for-android
python3-alpha/python3-src/Lib/test/test_dbm_dumb.py
60
5823
#! /usr/bin/env python3 """Test script for the dumbdbm module Original by Roger E. Masse """ import io import os import unittest import dbm.dumb as dumbdbm from test import support _fname = support.TESTFN def _delete_files(): for ext in [".dir", ".dat", ".bak"]: try: os.unlink(_fname + ext) except OSError: pass class DumbDBMTestCase(unittest.TestCase): _dict = {b'0': b'', b'a': b'Python:', b'b': b'Programming', b'c': b'the', b'd': b'way', b'f': b'Guido', b'g': b'intended', '\u00fc'.encode('utf-8') : b'!', } def __init__(self, *args): unittest.TestCase.__init__(self, *args) def test_dumbdbm_creation(self): f = dumbdbm.open(_fname, 'c') self.assertEqual(list(f.keys()), []) for key in self._dict: f[key] = self._dict[key] self.read_helper(f) f.close() def test_dumbdbm_creation_mode(self): # On platforms without chmod, don't do anything. if not (hasattr(os, 'chmod') and hasattr(os, 'umask')): return try: old_umask = os.umask(0o002) f = dumbdbm.open(_fname, 'c', 0o637) f.close() finally: os.umask(old_umask) expected_mode = 0o635 if os.name != 'posix': # Windows only supports setting the read-only attribute. # This shouldn't fail, but doesn't work like Unix either. expected_mode = 0o666 import stat st = os.stat(_fname + '.dat') self.assertEqual(stat.S_IMODE(st.st_mode), expected_mode) st = os.stat(_fname + '.dir') self.assertEqual(stat.S_IMODE(st.st_mode), expected_mode) def test_close_twice(self): f = dumbdbm.open(_fname) f[b'a'] = b'b' self.assertEqual(f[b'a'], b'b') f.close() f.close() def test_dumbdbm_modification(self): self.init_db() f = dumbdbm.open(_fname, 'w') self._dict[b'g'] = f[b'g'] = b"indented" self.read_helper(f) f.close() def test_dumbdbm_read(self): self.init_db() f = dumbdbm.open(_fname, 'r') self.read_helper(f) f.close() def test_dumbdbm_keys(self): self.init_db() f = dumbdbm.open(_fname) keys = self.keys_helper(f) f.close() def test_write_contains(self): f = dumbdbm.open(_fname) f[b'1'] = b'hello' self.assertIn(b'1', f) f.close() def test_write_write_read(self): # test for bug #482460 f = dumbdbm.open(_fname) f[b'1'] = b'hello' f[b'1'] = b'hello2' f.close() f = dumbdbm.open(_fname) self.assertEqual(f[b'1'], b'hello2') f.close() def test_str_read(self): self.init_db() f = dumbdbm.open(_fname, 'r') self.assertEqual(f['\u00fc'], self._dict['\u00fc'.encode('utf-8')]) def test_str_write_contains(self): self.init_db() f = dumbdbm.open(_fname) f['\u00fc'] = b'!' f['1'] = 'a' f.close() f = dumbdbm.open(_fname, 'r') self.assertIn('\u00fc', f) self.assertEqual(f['\u00fc'.encode('utf-8')], self._dict['\u00fc'.encode('utf-8')]) self.assertEqual(f[b'1'], b'a') def test_line_endings(self): # test for bug #1172763: dumbdbm would die if the line endings # weren't what was expected. f = dumbdbm.open(_fname) f[b'1'] = b'hello' f[b'2'] = b'hello2' f.close() # Mangle the file by changing the line separator to Windows or Unix with io.open(_fname + '.dir', 'rb') as file: data = file.read() if os.linesep == '\n': data = data.replace(b'\n', b'\r\n') else: data = data.replace(b'\r\n', b'\n') with io.open(_fname + '.dir', 'wb') as file: file.write(data) f = dumbdbm.open(_fname) self.assertEqual(f[b'1'], b'hello') self.assertEqual(f[b'2'], b'hello2') def read_helper(self, f): keys = self.keys_helper(f) for key in self._dict: self.assertEqual(self._dict[key], f[key]) def init_db(self): f = dumbdbm.open(_fname, 'w') for k in self._dict: f[k] = self._dict[k] f.close() def keys_helper(self, f): keys = sorted(f.keys()) dkeys = sorted(self._dict.keys()) self.assertEqual(keys, dkeys) return keys # Perform randomized operations. This doesn't make assumptions about # what *might* fail. def test_random(self): import random d = {} # mirror the database for dummy in range(5): f = dumbdbm.open(_fname) for dummy in range(100): k = random.choice('abcdefghijklm') if random.random() < 0.2: if k in d: del d[k] del f[k] else: v = random.choice((b'a', b'b', b'c')) * random.randrange(10000) d[k] = v f[k] = v self.assertEqual(f[k], v) f.close() f = dumbdbm.open(_fname) expected = sorted((k.encode("latin-1"), v) for k, v in d.items()) got = sorted(f.items()) self.assertEqual(expected, got) f.close() def tearDown(self): _delete_files() def setUp(self): _delete_files() def test_main(): try: support.run_unittest(DumbDBMTestCase) finally: _delete_files() if __name__ == "__main__": test_main()
apache-2.0
photoninger/ansible
lib/ansible/modules/system/group.py
17
13681
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: group author: - Stephen Fromm (@sfromm) version_added: "0.0.2" short_description: Add or remove groups requirements: - groupadd - groupdel - groupmod description: - Manage presence of groups on a host. - For Windows targets, use the M(win_group) module instead. options: name: description: - Name of the group to manage. required: true gid: description: - Optional I(GID) to set for the group. state: description: - Whether the group should be present or not on the remote host. choices: [ absent, present ] default: present system: description: - If I(yes), indicates that the group created is a system group. type: bool default: 'no' notes: - For Windows targets, use the M(win_group) module instead. ''' EXAMPLES = ''' - name: Ensure group "somegroup" exists group: name: somegroup state: present ''' import grp from ansible.module_utils.basic import AnsibleModule, load_platform_subclass class Group(object): """ This is a generic Group manipulation class that is subclassed based on platform. A subclass may wish to override the following action methods:- - group_del() - group_add() - group_mod() All subclasses MUST define platform and distribution (which may be None). """ platform = 'Generic' distribution = None GROUPFILE = '/etc/group' def __new__(cls, *args, **kwargs): return load_platform_subclass(Group, args, kwargs) def __init__(self, module): self.module = module self.state = module.params['state'] self.name = module.params['name'] self.gid = module.params['gid'] self.system = module.params['system'] def execute_command(self, cmd): return self.module.run_command(cmd) def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(kwargs[key]) elif key == 'system' and kwargs[key] is True: cmd.append('-r') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() for key in kwargs: if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('-g') cmd.append(kwargs[key]) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) def group_exists(self): try: if grp.getgrnam(self.name): return True except KeyError: return False def group_info(self): if not self.group_exists(): return False try: info = list(grp.getgrnam(self.name)) except KeyError: return False return info # =========================================== class SunOS(Group): """ This is a SunOS Group manipulation class. Solaris doesn't have the 'system' group concept. This overrides the following methods from the generic class:- - group_add() """ platform = 'SunOS' distribution = None GROUPFILE = '/etc/group' def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('-g') cmd.append(kwargs[key]) cmd.append(self.name) return self.execute_command(cmd) # =========================================== class AIX(Group): """ This is a AIX Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'AIX' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('rmgroup', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('mkgroup', True)] for key in kwargs: if key == 'gid' and kwargs[key] is not None: cmd.append('id=' + kwargs[key]) elif key == 'system' and kwargs[key] is True: cmd.append('-a') cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('chgroup', True)] info = self.group_info() for key in kwargs: if key == 'gid': if kwargs[key] is not None and info[2] != int(kwargs[key]): cmd.append('id=' + kwargs[key]) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class FreeBsdGroup(Group): """ This is a FreeBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'FreeBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: cmd.append('-g') cmd.append('%d' % int(self.gid)) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name] info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append('%d' % int(self.gid)) # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: return (0, '', '') return self.execute_command(cmd) return (None, '', '') # =========================================== class DarwinGroup(Group): """ This is a Mac OS X Darwin Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() group manipulation are done using dseditgroup(1). """ platform = 'Darwin' distribution = None def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'create'] if self.gid is not None: cmd += ['-i', self.gid] elif 'system' in kwargs and kwargs['system'] is True: gid = self.get_lowest_available_system_gid() if gid is not False: self.gid = str(gid) cmd += ['-i', self.gid] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_del(self): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'delete'] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += ['-o', 'edit'] if gid is not None: cmd += ['-i', gid] cmd += ['-L', self.name] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) return (None, '', '') def get_lowest_available_system_gid(self): # check for lowest available system gid (< 500) try: cmd = [self.module.get_bin_path('dscl', True)] cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID'] (rc, out, err) = self.execute_command(cmd) lines = out.splitlines() highest = 0 for group_info in lines: parts = group_info.split(' ') if len(parts) > 1: gid = int(parts[-1]) if gid > highest and gid < 500: highest = gid if highest == 0 or highest == 499: return False return (highest + 1) except: return False class OpenBsdGroup(Group): """ This is a OpenBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'OpenBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append('%d' % int(self.gid)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append('%d' % int(self.gid)) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== class NetBsdGroup(Group): """ This is a NetBSD Group manipulation class. This overrides the following methods from the generic class:- - group_del() - group_add() - group_mod() """ platform = 'NetBSD' distribution = None GROUPFILE = '/etc/group' def group_del(self): cmd = [self.module.get_bin_path('groupdel', True), self.name] return self.execute_command(cmd) def group_add(self, **kwargs): cmd = [self.module.get_bin_path('groupadd', True)] if self.gid is not None: cmd.append('-g') cmd.append('%d' % int(self.gid)) cmd.append(self.name) return self.execute_command(cmd) def group_mod(self, **kwargs): cmd = [self.module.get_bin_path('groupmod', True)] info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd.append('-g') cmd.append('%d' % int(self.gid)) if len(cmd) == 1: return (None, '', '') if self.module.check_mode: return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) # =========================================== def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), name=dict(type='str', required=True), gid=dict(type='str'), system=dict(type='bool', default=False), ), supports_check_mode=True, ) group = Group(module) module.debug('Group instantiated - platform %s' % group.platform) if group.distribution: module.debug('Group instantiated - distribution %s' % group.distribution) rc = None out = '' err = '' result = {} result['name'] = group.name result['state'] = group.state if group.state == 'absent': if group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_del() if rc != 0: module.fail_json(name=group.name, msg=err) elif group.state == 'present': if not group.group_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = group.group_add(gid=group.gid, system=group.system) else: (rc, out, err) = group.group_mod(gid=group.gid) if rc is not None and rc != 0: module.fail_json(name=group.name, msg=err) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err if group.group_exists(): info = group.group_info() result['system'] = group.system result['gid'] = info[2] module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
rodrigoprimo/gitinspector_wp
gitinspector/metrics.py
2
9387
# coding: utf-8 # # Copyright © 2012-2014 Ejwa Software. All rights reserved. # # This file is part of gitinspector. # # gitinspector is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # gitinspector is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gitinspector. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function from __future__ import unicode_literals from localization import N_ from outputable import Outputable from changes import FileDiff import comment import filtering import interval import re import subprocess __metric_eloc__ = {"java": 500, "c": 500, "cpp": 500, "h": 300, "hpp": 300, "php": 500, "py": 500, "glsl": 1000, "rb": 500, "js": 500, "sql": 1000, "xml": 1000} __metric_cc_tokens__ = [[["java", "js", "c", "cc", "cpp"], ["else", "for\s+\(.*\)", "if\s+\(.*\)", "case\s+\w+:", "default:", "while\s+\(.*\)"], ["assert", "break", "continue", "return"]], [["py"], ["^\s+elif .*:$", "^\s+else:$", "^\s+for .*:", "^\s+if .*:$", "^\s+while .*:$"], ["^\s+assert", "break", "continue", "return"]]] METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD = 50 METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD = 0.75 class MetricsLogic: def __init__(self): self.eloc = {} self.cyclomatic_complexity = {} self.cyclomatic_complexity_density = {} ls_tree_r = subprocess.Popen("git ls-tree --name-only -r " + interval.get_ref(), shell=True, bufsize=1, stdout=subprocess.PIPE).stdout for i in ls_tree_r.readlines(): i = i.strip().decode("unicode_escape", "ignore") i = i.encode("latin-1", "replace") i = i.decode("utf-8", "replace").strip("\"").strip("'").strip() if FileDiff.is_valid_extension(i) and not filtering.set_filtered(FileDiff.get_filename(i)): file_r = subprocess.Popen("git show " + interval.get_ref() + ":" + i.strip(), shell=True, bufsize=1, stdout=subprocess.PIPE).stdout.readlines() extension = FileDiff.get_extension(i) lines = MetricsLogic.get_eloc(file_r, extension) cc = MetricsLogic.get_cyclomatic_complexity(file_r, extension) if __metric_eloc__.get(extension, None) != None and __metric_eloc__[extension] < lines: self.eloc[i.strip()] = lines if METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD < cc: self.cyclomatic_complexity[i.strip()] = cc if lines > 0 and METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD < cc / float(lines): self.cyclomatic_complexity_density[i.strip()] = cc / float(lines) @staticmethod def get_cyclomatic_complexity(file_r, extension): is_inside_comment = False cc_counter = 0 entry_tokens = None exit_tokens = None for i in __metric_cc_tokens__: if extension in i[0]: entry_tokens = i[1] exit_tokens = i[2] if entry_tokens or exit_tokens: for i in file_r: i = i.decode("utf-8", "replace") (_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i) if not is_inside_comment and not comment.is_comment(extension, i): for t in entry_tokens: if re.search(t, i, re.DOTALL): cc_counter += 2 for t in exit_tokens: if re.search(t, i, re.DOTALL): cc_counter += 1 return cc_counter; return -1 @staticmethod def get_eloc(file_r, extension): is_inside_comment = False eloc_counter = 0 for i in file_r: i = i.decode("utf-8", "replace") (_, is_inside_comment) = comment.handle_comment_block(is_inside_comment, extension, i) if not is_inside_comment and not comment.is_comment(extension, i): eloc_counter += 1 return eloc_counter ELOC_INFO_TEXT = N_("The following files are suspiciously big (in order of severity)") CYCLOMATIC_COMPLEXITY_TEXT = N_("The following files have an elevated cyclomatic complexity (in order of severity)") CYCLOMATIC_COMPLEXITY_DENSITY_TEXT = N_("The following files have an elevated cyclomatic complexity density (in order of severity)") METRICS_MISSING_INFO_TEXT = N_("No metrics violations were found in the repository") METRICS_VIOLATION_SCORES = [[1.0, "minimal"], [1.25, "minor"], [1.5, "medium"], [2.0, "bad"], [3.0, "severe"]] def __get_metrics_score__(ceiling, value): for i in reversed(METRICS_VIOLATION_SCORES): if value > ceiling * i[0]: return i[1] class Metrics(Outputable): def output_text(self): metrics_logic = MetricsLogic() if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density: print("\n" + _(METRICS_MISSING_INFO_TEXT) + ".") if metrics_logic.eloc: print("\n" + _(ELOC_INFO_TEXT) + ":") for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True): print(_("{0} ({1} estimated lines of code)").format(i[1], str(i[0]))) if metrics_logic.cyclomatic_complexity: print("\n" + _(CYCLOMATIC_COMPLEXITY_TEXT) + ":") for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True): print(_("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0]))) if metrics_logic.cyclomatic_complexity_density: print("\n" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + ":") for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True): print(_("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0])) def output_html(self): metrics_logic = MetricsLogic() metrics_xml = "<div><div class=\"box\" id=\"metrics\">" if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density: metrics_xml += "<p>" + _(METRICS_MISSING_INFO_TEXT) + ".</p>" if metrics_logic.eloc: metrics_xml += "<div><h4>" + _(ELOC_INFO_TEXT) + ".</h4>" for n, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True)): metrics_xml += "<div class=\"" + __get_metrics_score__(__metric_eloc__[FileDiff.get_extension(i[1])], i[0]) + \ (" odd\">" if n % 2 == 1 else "\">") + \ _("{0} ({1} estimated lines of code)").format(i[1], str(i[0])) + "</div>" metrics_xml += "</div>" if metrics_logic.cyclomatic_complexity: metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_TEXT) + "</h4>" for n, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True)): metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_THRESHOLD, i[0]) + \ (" odd\">" if n % 2 == 1 else "\">") + \ _("{0} ({1} in cyclomatic complexity)").format(i[1], str(i[0])) + "</div>" metrics_xml += "</div>" if metrics_logic.cyclomatic_complexity_density: metrics_xml += "<div><h4>" + _(CYCLOMATIC_COMPLEXITY_DENSITY_TEXT) + "</h4>" for n, i in enumerate(sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True)): metrics_xml += "<div class=\"" + __get_metrics_score__(METRIC_CYCLOMATIC_COMPLEXITY_DENSITY_THRESHOLD, i[0]) + \ (" odd\">" if n % 2 == 1 else "\">") + \ _("{0} ({1:.3f} in cyclomatic complexity density)").format(i[1], i[0]) + "</div>" metrics_xml += "</div>" metrics_xml += "</div></div>" print(metrics_xml) def output_xml(self): metrics_logic = MetricsLogic() if not metrics_logic.eloc and not metrics_logic.cyclomatic_complexity and not metrics_logic.cyclomatic_complexity_density: print("\t<metrics>\n\t\t<message>" + _(METRICS_MISSING_INFO_TEXT) + "</message>\n\t</metrics>") else: eloc_xml = "" if metrics_logic.eloc: for i in sorted(set([(j, i) for (i, j) in metrics_logic.eloc.items()]), reverse = True): eloc_xml += "\t\t\t<estimated-lines-of-code>\n" eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n" eloc_xml += "\t\t\t</estimated-lines-of-code>\n" if metrics_logic.cyclomatic_complexity: for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity.items()]), reverse = True): eloc_xml += "\t\t\t<cyclomatic-complexity>\n" eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" eloc_xml += "\t\t\t\t<value>" + str(i[0]) + "</value>\n" eloc_xml += "\t\t\t</cyclomatic-complexity>\n" if metrics_logic.cyclomatic_complexity_density: for i in sorted(set([(j, i) for (i, j) in metrics_logic.cyclomatic_complexity_density.items()]), reverse = True): eloc_xml += "\t\t\t<cyclomatic-complexity-density>\n" eloc_xml += "\t\t\t\t<file-name>" + i[1] + "</file-name>\n" eloc_xml += "\t\t\t\t<value>{0:.3f}</value>\n".format(i[0]) eloc_xml += "\t\t\t</cyclomatic-complexity-density>\n" print("\t<metrics>\n\t\t<violations>\n" + eloc_xml + "\t\t</violations>\n\t</metrics>")
gpl-3.0