code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#!/usr/bin/env python3 # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate a spatial analysis against an arbitrary library. To use, build the 'binary_size_tool' target. Then run this tool, passing in the location of the library to be analyzed along with any other options you desire. """ import json import logging import multiprocessing import optparse import os import re import shutil import struct import subprocess import sys import tempfile import time import binary_size_utils import elf_symbolizer # Node dictionary keys. These are output in json read by the webapp so # keep them short to save file size. # Note: If these change, the webapp must also change. NODE_TYPE_KEY = 'k' NODE_NAME_KEY = 'n' NODE_CHILDREN_KEY = 'children' NODE_SYMBOL_TYPE_KEY = 't' NODE_SYMBOL_SIZE_KEY = 'value' NODE_MAX_DEPTH_KEY = 'maxDepth' NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement' # The display name of the bucket where we put symbols without path. NAME_NO_PATH_BUCKET = '(No Path)' # Try to keep data buckets smaller than this to avoid killing the # graphing lib. BIG_BUCKET_LIMIT = 3000 def _MkChild(node, name): child = node[NODE_CHILDREN_KEY].get(name) if child is None: child = {NODE_NAME_KEY: name, NODE_CHILDREN_KEY: {}} node[NODE_CHILDREN_KEY][name] = child return child def SplitNoPathBucket(node): """NAME_NO_PATH_BUCKET can be too large for the graphing lib to handle. Split it into sub-buckets in that case.""" root_children = node[NODE_CHILDREN_KEY] if NAME_NO_PATH_BUCKET in root_children: no_path_bucket = root_children[NAME_NO_PATH_BUCKET] old_children = no_path_bucket[NODE_CHILDREN_KEY] count = 0 for symbol_type, symbol_bucket in old_children.items(): count += len(symbol_bucket[NODE_CHILDREN_KEY]) if count > BIG_BUCKET_LIMIT: new_children = {} no_path_bucket[NODE_CHILDREN_KEY] = new_children current_bucket = None index = 0 for symbol_type, symbol_bucket in old_children.items(): for symbol_name, value in symbol_bucket[ NODE_CHILDREN_KEY].items(): if index % BIG_BUCKET_LIMIT == 0: group_no = (index / BIG_BUCKET_LIMIT) + 1 current_bucket = _MkChild( no_path_bucket, '%s subgroup %d' % (NAME_NO_PATH_BUCKET, group_no)) assert not NODE_TYPE_KEY in node or node[ NODE_TYPE_KEY] == 'p' node[NODE_TYPE_KEY] = 'p' # p for path index += 1 symbol_size = value[NODE_SYMBOL_SIZE_KEY] AddSymbolIntoFileNode(current_bucket, symbol_type, symbol_name, symbol_size) def MakeChildrenDictsIntoLists(node): largest_list_len = 0 if NODE_CHILDREN_KEY in node: largest_list_len = len(node[NODE_CHILDREN_KEY]) child_list = [] for child in node[NODE_CHILDREN_KEY].values(): child_largest_list_len = MakeChildrenDictsIntoLists(child) if child_largest_list_len > largest_list_len: largest_list_len = child_largest_list_len child_list.append(child) node[NODE_CHILDREN_KEY] = child_list return largest_list_len def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size): """Puts symbol into the file path node |node|. Returns the number of added levels in tree. I.e. returns 2.""" # 'node' is the file node and first step is to find its symbol-type bucket. node[NODE_LAST_PATH_ELEMENT_KEY] = True node = _MkChild(node, symbol_type) assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b' node[NODE_SYMBOL_TYPE_KEY] = symbol_type node[NODE_TYPE_KEY] = 'b' # b for bucket # 'node' is now the symbol-type bucket. Make the child entry. node = _MkChild(node, symbol_name) if NODE_CHILDREN_KEY in node: if node[NODE_CHILDREN_KEY]: logging.warning( 'A container node used as symbol for %s.' % symbol_name) # This is going to be used as a leaf so no use for child list. del node[NODE_CHILDREN_KEY] node[NODE_SYMBOL_SIZE_KEY] = symbol_size node[NODE_SYMBOL_TYPE_KEY] = symbol_type node[NODE_TYPE_KEY] = 's' # s for symbol return 2 # Depth of the added subtree. def MakeCompactTree(symbols, symbol_path_origin_dir): result = { NODE_NAME_KEY: '/', NODE_CHILDREN_KEY: {}, NODE_TYPE_KEY: 'p', NODE_MAX_DEPTH_KEY: 0 } seen_symbol_with_path = False cwd = os.path.abspath(os.getcwd()) for symbol_name, symbol_type, symbol_size, file_path, _address in symbols: if 'vtable for ' in symbol_name: symbol_type = '@' # hack to categorize these separately # Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz'] if file_path and file_path != "??": file_path = os.path.abspath( os.path.join(symbol_path_origin_dir, file_path)) # Let the output structure be relative to $CWD if inside $CWD, # otherwise relative to the disk root. This is to avoid # unnecessary click-through levels in the output. if file_path.startswith(cwd + os.sep): file_path = file_path[len(cwd):] if file_path.startswith('/'): file_path = file_path[1:] seen_symbol_with_path = True else: file_path = NAME_NO_PATH_BUCKET path_parts = file_path.split('/') # Find pre-existing node in tree, or update if it already exists node = result depth = 0 while len(path_parts) > 0: path_part = path_parts.pop(0) if len(path_part) == 0: continue depth += 1 node = _MkChild(node, path_part) assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p' node[NODE_TYPE_KEY] = 'p' # p for path depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size) result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth) if not seen_symbol_with_path: logging.warning('Symbols lack paths. Data will not be structured.') # The (no path) bucket can be extremely large if we failed to get # path information. Split it into subgroups if needed. SplitNoPathBucket(result) largest_list_len = MakeChildrenDictsIntoLists(result) if largest_list_len > BIG_BUCKET_LIMIT: logging.warning('There are sections with %d nodes. ' 'Results might be unusable.' % largest_list_len) return result def DumpCompactTree(symbols, symbol_path_origin_dir, outfile): tree_root = MakeCompactTree(symbols, symbol_path_origin_dir) with open(outfile, 'w') as out: out.write('var tree_data=') # Use separators without whitespace to get a smaller file. json.dump(tree_root, out, separators=(',', ':')) print('Writing %d bytes json' % os.path.getsize(outfile)) def MakeSourceMap(symbols): sources = {} for _sym, _symbol_type, size, path, _address in symbols: key = None if path: key = os.path.normpath(path) else: key = '[no path]' if key not in sources: sources[key] = {'path': path, 'symbol_count': 0, 'size': 0} record = sources[key] record['size'] += size record['symbol_count'] += 1 return sources # Regex for parsing "nm" output. A sample line looks like this: # 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95 # # The fields are: address, size, type, name, source location # Regular expression explained ( see also: https://xkcd.com/208 ): # ([0-9a-f]{8,}+) The address # [\s]+ Whitespace separator # ([0-9a-f]{8,}+) The size. From here on out it's all optional. # [\s]+ Whitespace separator # (\S?) The symbol type, which is any non-whitespace char # [\s*] Whitespace separator # ([^\t]*) Symbol name, any non-tab character (spaces ok!) # [\t]? Tab separator # (.*) The location (filename[:linennum|?][ (discriminator n)] sNmPattern = re.compile( r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)') class Progress(): def __init__(self): self.count = 0 self.skip_count = 0 self.collisions = 0 self.time_last_output = time.time() self.count_last_output = 0 self.disambiguations = 0 self.was_ambiguous = 0 def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, disambiguate, src_path): nm_output = RunNm(library, nm_binary) nm_output_lines = nm_output.splitlines() nm_output_lines_len = len(nm_output_lines) address_symbol = {} progress = Progress() def map_address_symbol(symbol, addr): progress.count += 1 if addr in address_symbol: # 'Collision between %s and %s.' % (str(symbol.name), # str(address_symbol[addr].name)) progress.collisions += 1 else: if symbol.disambiguated: progress.disambiguations += 1 if symbol.was_ambiguous: progress.was_ambiguous += 1 address_symbol[addr] = symbol progress_output() def progress_output(): progress_chunk = 100 if progress.count % progress_chunk == 0: time_now = time.time() time_spent = time_now - progress.time_last_output if time_spent > 1.0: # Only output at most once per second. progress.time_last_output = time_now chunk_size = progress.count - progress.count_last_output progress.count_last_output = progress.count if time_spent > 0: speed = chunk_size / time_spent else: speed = 0 progress_percent = (100.0 * ( progress.count + progress.skip_count) / nm_output_lines_len) disambiguation_percent = 0 if progress.disambiguations != 0: disambiguation_percent = (100.0 * progress.disambiguations / progress.was_ambiguous) sys.stdout.write( '\r%.1f%%: Looked up %d symbols (%d collisions, ' '%d disambiguations where %.1f%% succeeded)' ' - %.1f lookups/s.' % (progress_percent, progress.count, progress.collisions, progress.disambiguations, disambiguation_percent, speed)) # In case disambiguation was disabled, we remove the source path (which upon # being set signals the symbolizer to enable disambiguation) if not disambiguate: src_path = None symbolizer = elf_symbolizer.ELFSymbolizer( library, addr2line_binary, map_address_symbol, max_concurrent_jobs=jobs, source_root_path=src_path) user_interrupted = False try: for binary_line in nm_output_lines: line = binary_line.decode() match = sNmPattern.match(line) if match: location = match.group(5) if not location: addr = int(match.group(1), 16) size = int(match.group(2), 16) if addr in address_symbol: # Already looked up, shortcut # ELFSymbolizer. map_address_symbol(address_symbol[addr], addr) continue elif size == 0: # Save time by not looking up empty symbols (do they even exist?) print('Empty symbol: ' + line) else: symbolizer.SymbolizeAsync(addr, addr) continue progress.skip_count += 1 except KeyboardInterrupt: user_interrupted = True print('Interrupting - killing subprocesses. Please wait.') try: symbolizer.Join() except KeyboardInterrupt: # Don't want to abort here since we will be finished in a few seconds. user_interrupted = True print('Patience you must have my young padawan.') print('') if user_interrupted: print('Skipping the rest of the file mapping. ' 'Output will not be fully classified.') symbol_path_origin_dir = os.path.dirname(os.path.abspath(library)) with open(outfile, 'w') as out: for binary_line in nm_output_lines: line = binary_line.decode() match = sNmPattern.match(line) if match: location = match.group(5) if not location: addr = int(match.group(1), 16) symbol = address_symbol.get(addr) if symbol is not None: path = '??' if symbol.source_path is not None: path = os.path.abspath( os.path.join(symbol_path_origin_dir, symbol.source_path)) line_number = 0 if symbol.source_line is not None: line_number = symbol.source_line out.write('%s\t%s:%d\n' % (line, path, line_number)) continue out.write('%s\n' % line) print('%d symbols in the results.' % len(address_symbol)) def RunNm(binary, nm_binary): cmd = [ nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', binary ] nm_process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (process_output, err_output) = nm_process.communicate() if nm_process.returncode != 0: if err_output: raise Exception(err_output) else: raise Exception(process_output) return process_output def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary, nm_binary, disambiguate, src_path): if nm_infile is None: if outfile is None: outfile = tempfile.NamedTemporaryFile(delete=False).name if verbose: print('Running parallel addr2line, dumping symbols to ' + outfile) RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs, disambiguate, src_path) nm_infile = outfile elif verbose: print('Using nm input from ' + nm_infile) with open(nm_infile, 'r') as infile: return list(binary_size_utils.ParseNm(infile)) PAK_RESOURCE_ID_TO_STRING = {"inited": False} def LoadPakIdsFromResourceFile(filename): """Given a file name, it loads everything that looks like a resource id into PAK_RESOURCE_ID_TO_STRING.""" with open(filename) as resource_header: for line in resource_header: if line.startswith("#define "): line_data = line.split() if len(line_data) == 3: try: resource_number = int(line_data[2]) resource_name = line_data[1] PAK_RESOURCE_ID_TO_STRING[ resource_number] = resource_name except ValueError: pass def GetReadablePakResourceName(pak_file, resource_id): """Pak resources have a numeric identifier. It is not helpful when trying to locate where footprint is generated. This does its best to map the number to a usable string.""" if not PAK_RESOURCE_ID_TO_STRING['inited']: # Try to find resource header files generated by grit when # building the pak file. We'll look for files named *resources.h" # and lines of the type: # #define MY_RESOURCE_JS 1234 PAK_RESOURCE_ID_TO_STRING['inited'] = True gen_dir = os.path.join(os.path.dirname(pak_file), 'gen') if os.path.isdir(gen_dir): for dirname, _dirs, files in os.walk(gen_dir): for filename in files: if filename.endswith('resources.h'): LoadPakIdsFromResourceFile( os.path.join(dirname, filename)) return PAK_RESOURCE_ID_TO_STRING.get(resource_id, 'Pak Resource %d' % resource_id) def AddPakData(symbols, pak_file): """Adds pseudo-symbols from a pak file.""" pak_file = os.path.abspath(pak_file) with open(pak_file, 'rb') as pak: data = pak.read() PAK_FILE_VERSION = 4 HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries) # and one uint8 (encoding of text resources) INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32. version, num_entries, _encoding = struct.unpack('<IIB', data[:HEADER_LENGTH]) assert version == PAK_FILE_VERSION, ( 'Unsupported pak file ' 'version (%d) in %s. Only ' 'support version %d' % (version, pak_file, PAK_FILE_VERSION)) if num_entries > 0: # Read the index and data. data = data[HEADER_LENGTH:] for _ in range(num_entries): resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE]) data = data[INDEX_ENTRY_SIZE:] _next_id, next_offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE]) resource_size = next_offset - offset symbol_name = GetReadablePakResourceName(pak_file, resource_id) symbol_path = pak_file symbol_type = 'd' # Data. Approximation. symbol_size = resource_size symbols.append((symbol_name, symbol_type, symbol_size, symbol_path)) def _find_in_system_path(binary): """Locate the full path to binary in the system path or return None if not found.""" system_path = os.environ["PATH"].split(os.pathsep) for path in system_path: binary_path = os.path.join(path, binary) if os.path.isfile(binary_path): return binary_path return None def CheckDebugFormatSupport(library, addr2line_binary): """Kills the program if debug data is in an unsupported format. There are two common versions of the DWARF debug formats and since we are right now transitioning from DWARF2 to newer formats, it's possible to have a mix of tools that are not compatible. Detect that and abort rather than produce meaningless output.""" tool_output = subprocess.check_output([addr2line_binary, '--version']).decode() version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M) parsed_output = version_re.match(tool_output) major = int(parsed_output.group(1)) minor = int(parsed_output.group(2)) supports_dwarf4 = major > 2 or major == 2 and minor > 22 if supports_dwarf4: return print('Checking version of debug information in %s.' % library) debug_info = subprocess.check_output( ['readelf', '--debug-dump=info', '--dwarf-depth=1', library]) dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M) parsed_dwarf_format_output = dwarf_version_re.search(debug_info) version = int(parsed_dwarf_format_output.group(1)) if version > 2: print( 'The supplied tools only support DWARF2 debug data but the binary\n' + 'uses DWARF%d. Update the tools or compile the binary\n' % version + 'with -gdwarf-2.') sys.exit(1) def main(): usage = """%prog [options] Runs a spatial analysis on a given library, looking up the source locations of its symbols and calculating how much space each directory, source file, and so on is taking. The result is a report that can be used to pinpoint sources of large portions of the binary, etceteras. Under normal circumstances, you only need to pass two arguments, thusly: %prog --library /path/to/library --destdir /path/to/output In this mode, the program will dump the symbols from the specified library and map those symbols back to source locations, producing a web-based report in the specified output directory. Other options are available via '--help'. """ parser = optparse.OptionParser(usage=usage) parser.add_option( '--nm-in', metavar='PATH', help='if specified, use nm input from <path> instead of ' 'generating it. Note that source locations should be ' 'present in the file; i.e., no addr2line symbol lookups ' 'will be performed when this option is specified. ' 'Mutually exclusive with --library.') parser.add_option( '--destdir', metavar='PATH', help='write output to the specified directory. An HTML ' 'report is generated here along with supporting files; ' 'any existing report will be overwritten.') parser.add_option( '--library', metavar='PATH', help='if specified, process symbols in the library at ' 'the specified path. Mutually exclusive with --nm-in.') parser.add_option( '--pak', metavar='PATH', help='if specified, includes the contents of the ' 'specified *.pak file in the output.') parser.add_option( '--nm-binary', help='use the specified nm binary to analyze library. ' 'This is to be used when the nm in the path is not for ' 'the right architecture or of the right version.') parser.add_option( '--addr2line-binary', help='use the specified addr2line binary to analyze ' 'library. This is to be used when the addr2line in ' 'the path is not for the right architecture or ' 'of the right version.') parser.add_option( '--jobs', type='int', help='number of jobs to use for the parallel ' 'addr2line processing pool; defaults to 1. More ' 'jobs greatly improve throughput but eat RAM like ' 'popcorn, and take several gigabytes each. Start low ' 'and ramp this number up until your machine begins to ' 'struggle with RAM. ' 'This argument is only valid when using --library.') parser.add_option( '-v', '--verbose', dest='verbose', action='store_true', help='be verbose, printing lots of status information.') parser.add_option( '--nm-out', metavar='PATH', help='(deprecated) No-op. nm.out is stored in --destdir.') parser.add_option( '--no-nm-out', action='store_true', help='do not keep the nm output file. This file is useful ' 'if you want to see the fully processed nm output after ' 'the symbols have been mapped to source locations, or if ' 'you plan to run explain_binary_size_delta.py. By default ' 'the file \'nm.out\' is placed alongside the generated ' 'report. The nm.out file is only created when using ' '--library.') parser.add_option( '--disable-disambiguation', action='store_true', help='disables the disambiguation process altogether,' ' NOTE: this may, depending on your toolchain, produce' ' output with some symbols at the top layer if addr2line' ' could not get the entire source path.') parser.add_option( '--source-path', default='./', help='the path to the source code of the output binary, ' 'default set to current directory. Used in the' ' disambiguation process.') opts, _args = parser.parse_args() if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in): parser.error('exactly one of --library or --nm-in is required') if opts.nm_out: print('WARNING: --nm-out is deprecated and has no effect.', file=sys.stderr) if (opts.nm_in): if opts.jobs: print('WARNING: --jobs has no effect when used with --nm-in', file=sys.stderr) if not opts.destdir: parser.error('--destdir is a required argument') if not opts.jobs: # Use the number of processors but cap between 2 and 4 since raw # CPU power isn't the limiting factor. It's I/O limited, memory # bus limited and available-memory-limited. Too many processes and # the computer will run out of memory and it will be slow. opts.jobs = max(2, min(4, multiprocessing.cpu_count())) if opts.addr2line_binary: assert os.path.isfile(opts.addr2line_binary) addr2line_binary = opts.addr2line_binary else: addr2line_binary = _find_in_system_path('addr2line') assert addr2line_binary, 'Unable to find addr2line in the path. '\ 'Use --addr2line-binary to specify location.' if opts.nm_binary: assert os.path.isfile(opts.nm_binary) nm_binary = opts.nm_binary else: nm_binary = _find_in_system_path('nm') assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\ 'to specify location.' if opts.pak: assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak print('addr2line: %s' % addr2line_binary) print('nm: %s' % nm_binary) if opts.library: CheckDebugFormatSupport(opts.library, addr2line_binary) # Prepare output directory and report guts if not os.path.exists(opts.destdir): os.makedirs(opts.destdir, 0o755) nm_out = os.path.join(opts.destdir, 'nm.out') if opts.no_nm_out: nm_out = None # Copy report boilerplate into output directory. This also proves that the # output directory is safe for writing, so there should be no problems writing # the nm.out file later. data_js_file_name = os.path.join(opts.destdir, 'data.js') d3_out = os.path.join(opts.destdir, 'd3') if not os.path.exists(d3_out): os.makedirs(d3_out, 0o755) d3_src = os.path.join(os.path.dirname(__file__), '..', '..', 'd3', 'src') template_src = os.path.join(os.path.dirname(__file__), 'template') shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out) shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out) shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir) shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir) # Run nm and/or addr2line to gather the data symbols = GetNmSymbols(opts.nm_in, nm_out, opts.library, opts.jobs, opts.verbose is True, addr2line_binary, nm_binary, opts.disable_disambiguation is None, opts.source_path) # Post-processing if opts.pak: AddPakData(symbols, opts.pak) if opts.library: symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library)) else: # Just a guess. Hopefully all paths in the input file are absolute. symbol_path_origin_dir = os.path.abspath(os.getcwd()) # Dump JSON for the HTML report. DumpCompactTree(symbols, symbol_path_origin_dir, data_js_file_name) print('Report saved to ' + opts.destdir + '/index.html') if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
# # The Python Imaging Library. # $Id$ # # MSP file handling # # This is the format used by the Paint program in Windows 1 and 2. # # History: # 95-09-05 fl Created # 97-01-03 fl Read/write MSP images # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1995-97. # # See the README file for information on usage and redistribution. # __version__ = "0.1" from PIL import Image, ImageFile, _binary # # read MSP files i16 = _binary.i16le def _accept(prefix): return prefix[:4] in [b"DanM", b"LinS"] ## # Image plugin for Windows MSP images. This plugin supports both # uncompressed (Windows 1.0). class MspImageFile(ImageFile.ImageFile): format = "MSP" format_description = "Windows Paint" def _open(self): # Header s = self.fp.read(32) if s[:4] not in [b"DanM", b"LinS"]: raise SyntaxError("not an MSP file") # Header checksum sum = 0 for i in range(0, 32, 2): sum = sum ^ i16(s[i:i+2]) if sum != 0: raise SyntaxError("bad MSP checksum") self.mode = "1" self.size = i16(s[4:]), i16(s[6:]) if s[:4] == b"DanM": self.tile = [("raw", (0,0)+self.size, 32, ("1", 0, 1))] else: self.tile = [("msp", (0,0)+self.size, 32+2*self.size[1], None)] # # write MSP files (uncompressed only) o16 = _binary.o16le def _save(im, fp, filename): if im.mode != "1": raise IOError("cannot write mode %s as MSP" % im.mode) # create MSP header header = [0] * 16 header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 header[2], header[3] = im.size header[4], header[5] = 1, 1 header[6], header[7] = 1, 1 header[8], header[9] = im.size sum = 0 for h in header: sum = sum ^ h header[12] = sum # FIXME: is this the right field? # header for h in header: fp.write(o16(h)) # image body ImageFile._save(im, fp, [("raw", (0,0)+im.size, 32, ("1", 0, 1))]) # # registry Image.register_open("MSP", MspImageFile, _accept) Image.register_save("MSP", _save) Image.register_extension("MSP", ".msp")
unknown
codeparrot/codeparrot-clean
from urllib2 import urlopen import requests import sys import random import json import time green = '\033[01;32m' red = '\033[01;31m' native = '\033[m' my_ip=0 def check_website_access(): sys.stdout.write("Checking if website accessible to the Internet......") r = requests.get('http://'+my_ip) if r.status_code == 200: sys.stdout.write(green+"\t Success\n"+native) else: sys.stdout.write(red+"\t Fail\n"+native) def check_rest_functionality(): print "Checking REST Framework functionality......" sys.stdout.write("\t POST /api/device-api/......") url = 'http://'+my_ip+'/api/device-api/' serial = random.randrange(1,65535) payload = {'serial':serial} headers = {'content-type': 'application/json'} r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code == 201: sys.stdout.write(green+"\t Success\n"+native) else: sys.stdout.write(red+"\t Fail\n"+native) sys.stdout.write("\t GET /api/device-api/......") url = 'http://'+my_ip+'/api/device-api/' r = requests.get(url) if r.status_code == 200: sys.stdout.write(green+"\t Success\n"+native) else: sys.stdout.write(red+"\t Fail\n"+native) sys.stdout.write("\t GET /api/settings-api/"+str(serial)+"/") url = 'http://'+my_ip+'/api/settings-api/'+str(serial)+'/' r = requests.get(url) if r.status_code == 200: sys.stdout.write(green+"\t Success\n"+native) else: sys.stdout.write(red+"\t Fail\n"+native) sys.stdout.write("\t POST /api/event-api/") url = 'http://'+my_ip+'/api/event-api/' payload = { "device": "/api/device-api/"+str(serial)+"/", "time": [str(hex(int(time.time()*1000))), "1"], "dataPoints": [{"wattage": random.random()}], } r = requests.post(url, data=json.dumps(payload), headers=headers) if r.status_code == 201: sys.stdout.write(green+"\t\t Success\n"+native) else: sys.stdout.write(red+"\t\t Fail\n"+native) print r.text sys.stdout.write("\t DELETE /api/device-api/"+str(serial)+"/") url = 'http://'+my_ip+'/api/device-api/'+str(serial)+'/' r = requests.delete(url) if r.status_code == 204: sys.stdout.write(green+"\t Success\n"+native) else: sys.stdout.write(red+"\t Fail\n"+native) def main(): check_website_access() check_rest_functionality() if __name__ == "__main__": my_ip = urlopen('http://ip.42.pl/raw').read() main()
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.exception import JSONResponseError class LimitExceededException(JSONResponseError): pass class ResourceInUseException(JSONResponseError): pass class AccessDeniedException(JSONResponseError): pass class ResourceNotFoundException(JSONResponseError): pass class InternalServiceException(JSONResponseError): pass class ValidationException(JSONResponseError): pass class IncompatibleVersionException(JSONResponseError): pass
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python """ @file route_departOffset.py @author Daniel Krajzewicz @author Michael Behrisch @date 11.09.2009 @version $Id: route_departOffset.py 11671 2012-01-07 20:14:30Z behrisch $ Applies a given offset to the given route's departure time SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/ Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors All rights reserved """ import sys, optparse, array from xml.sax import make_parser, handler class RouteReader(handler.ContentHandler): def __init__(self, offset, out): self._offset = offset self._out = out def startElement(self, name, attrs): self._out.write('<' + name) for a in attrs.keys(): val = attrs[a] if a=="depart": val = str(int(val) + self._offset) if a=="id": val = val + "_" + str(self._offset) self._out.write(' ' + a + '="' + val + '"') self._out.write('>') def endElement(self, name): self._out.write('</' + name + '>') def characters(self, content): self._out.write(content) def main(infile, outfile, offset): out = open(outfile, "w") parser = make_parser() parser.setContentHandler(RouteReader(offset, out)) parser.parse(infile) if __name__ == "__main__": if len(sys.argv) < 4: print "Usage: route_departOffset.py <INPUT_FILE> <OUTPUT_FILE> <OFFSET>" sys.exit() main(sys.argv[1], sys.argv[2], int(sys.argv[3]))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para videos externos de videobam # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os from core import scrapertools from core import logger from core import config def test_video_exists( page_url ): logger.info("[videobam.py] test_video_exists(page_url='%s')" % page_url) data = scrapertools.cache_page(page_url) if "Video is processing" in data: return False,"El fichero está en proceso" return True,"" def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[videobam.py] get_video_url(page_url='%s')" % page_url) data = scrapertools.cache_page(page_url) video_urls = [] patronSD= " low: '([^']+)'" matches = re.compile(patronSD,re.DOTALL).findall(data) for match in matches: videourl = match video_urls.append( [ "LQ [videobam]" , videourl ] ) patronHD = " high: '([^']+)'" matches = re.compile(patronHD,re.DOTALL).findall(data) for match in matches: videourl = match video_urls.append( [ "HQ [videobam]" , videourl ] ) if len(matches)==0: # "scaling":"fit","url":"http:\/\/f10.videobam.com\/storage\/11\/videos\/a\/aa\/AaUsV\/encoded.mp4 patron = '[\W]scaling[\W]:[\W]fit[\W],[\W]url"\:"([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: videourl = match.replace('\/','/') videourl = urllib.unquote(videourl) video_urls.append( [ ".mp4 [videobam]" , videourl ] ) for video_url in video_urls: logger.info("[videobam.py] %s - %s" % (video_url[0],video_url[1])) return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] # VideoBam para AnimeID src="http://videobam.com/widget/USezW" # VideoBam custom src="http://videobam.com/widget/USezW/custom/568" patronvideos = 'videobam.com/widget/([\w]+)' logger.info("[videobam.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[videobam]" url = "http://videobam.com/"+match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'videobam' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) # http://videobam.com/fsgUt patronvideos = 'videobam.com/([\w]+)' logger.info("[videobam.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[videobam]" url = "http://videobam.com/"+match if not match.startswith("videos"): if url not in encontrados and url!="http://videobam.com/widget": logger.info(" url="+url) devuelve.append( [ titulo , url , 'videobam' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve def test(): video_urls = get_video_url("http://videobam.com/enant") return len(video_urls)>0
unknown
codeparrot/codeparrot-clean
import logging import operator from datetime import datetime from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db.backends.ddl_references import ( Columns, Expressions, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import names_digest, split_identifier, truncate_name from django.db.models import Deferrable, Index from django.db.models.fields.composite import CompositePrimaryKey from django.db.models.sql import Query from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone logger = logging.getLogger("django.db.backends.schema") def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _all_related_fields(model): # Related fields must be returned in a deterministic order. return sorted( model._meta._get_fields( forward=False, reverse=True, include_hidden=True, include_parents=False, ), key=operator.attrgetter("name"), ) def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. related_fields = zip( ( obj for obj in _all_related_fields(old_field.model) if _is_relevant_relation(obj, old_field) ), ( obj for obj in _all_related_fields(new_field.model) if _is_relevant_relation(obj, new_field) ), ) for old_rel, new_rel in related_fields: yield old_rel, new_rel yield from _related_non_m2m_objects( old_rel.remote_field, new_rel.remote_field, ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_alter_column_no_default_null = sql_alter_column_no_default sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_rename_column = ( "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" ) sql_update_with_default = ( "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" ) sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s" sql_check_constraint = "CHECK (%(check)s)" sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_constraint = "CONSTRAINT %(name)s %(constraint)s" sql_pk_constraint = "PRIMARY KEY (%(columns)s)" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = sql_delete_constraint sql_create_unique = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s " "UNIQUE%(nulls_distinct)s (%(columns)s)%(deferrable)s" ) sql_delete_unique = sql_delete_constraint sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(on_delete_db)s%(deferrable)s" ) sql_create_inline_fk = None sql_create_column_inline_fk = None sql_delete_fk = sql_delete_constraint sql_create_index = ( "CREATE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(extra)s%(condition)s" ) sql_create_unique_index = ( "CREATE UNIQUE INDEX %(name)s ON %(table)s " "(%(columns)s)%(include)s%(nulls_distinct)s%(condition)s" ) sql_rename_index = "ALTER INDEX %(old_name)s RENAME TO %(new_name)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" ) sql_delete_pk = sql_delete_constraint sql_delete_procedure = "DROP PROCEDURE %(procedure)s" sql_alter_table_comment = "COMMENT ON TABLE %(table)s IS %(comment)s" sql_alter_column_comment = "COMMENT ON COLUMN %(table)s.%(column)s IS %(comment)s" def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql, None) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if ( not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl ): raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug( "%s; (params %r)", sql, params, extra={"params": params, "sql": sql} ) if self.collect_sql: ending = "" if sql.rstrip().endswith(";") else ";" if params is not None: self.collected_sql.append( (sql % tuple(map(self.quote_value, params))) + ending ) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) def table_sql(self, model): """Take a model and return its table definition.""" # Add any unique_togethers (always deferred, as some fields might be # created afterward, like geometry fields with some backends). for field_names in model._meta.unique_together: fields = [model._meta.get_field(field) for field in field_names] self.deferred_sql.append(self._create_unique_sql(model, fields)) # Create column SQL, add FK deferreds if needed. column_sqls = [] params = [] for field in model._meta.local_fields: # SQL. definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here. db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params # Autoincrement SQL (for backends with inline variant). col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += " %s" % col_type_suffix params.extend(extra_params) # FK. if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column if self.sql_create_inline_fk: definition += " " + self.sql_create_inline_fk % { "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), "on_delete_db": self._create_on_delete_sql(model, field), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append( self._create_fk_sql( model, field, "_fk_%(to_table)s_%(to_column)s" ) ) # Add the SQL to our big list. column_sqls.append( "%s %s" % ( self.quote_name(field.column), definition, ) ) # Autoincrement SQL (for backends with post table definition # variant). if field.get_internal_type() in ( "AutoField", "BigAutoField", "SmallAutoField", ): autoinc_sql = self.connection.ops.autoinc_sql( model._meta.db_table, field.column ) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) # The BaseConstraint DDL creation methods such as constraint_sql(), # create_sql(), and delete_sql(), were not designed in a way that # separate SQL from parameters which make their generated SQL unfit to # be used in a context where parametrization is delegated to the # backend. constraint_sqls = [] if params: # If parameters are present (e.g. a DEFAULT clause on backends that # allow parametrization) defer constraint creation so they are not # mixed with SQL meant to be parametrized. for constraint in model._meta.constraints: self.deferred_sql.append(constraint.create_sql(model, self)) else: constraint_sqls.extend( constraint.constraint_sql(model, self) for constraint in model._meta.constraints ) pk = model._meta.pk if isinstance(pk, CompositePrimaryKey): constraint_sqls.append(self._pk_constraint_sql(pk.columns)) sql = self.sql_create_table % { "table": self.quote_name(model._meta.db_table), "definition": ", ".join( str(statement) for statement in (*column_sqls, *constraint_sqls) if statement ), } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql( model._meta.db_tablespace ) if tablespace_sql: sql += " " + tablespace_sql return sql, params # Field <-> database mapping functions def _iter_column_sql( self, column_db_type, params, model, field, field_db_params, include_default ): yield column_db_type if collation := field_db_params.get("collation"): yield self._collate_sql(collation) # Work out nullability. null = field.null # Add database default. if field.has_db_default(): default_sql, default_params = self.db_default_sql(field) yield f"DEFAULT {default_sql}" params.extend(default_params) include_default = False # Include a default value, if requested. include_default = ( include_default and not self.skip_default(field) and # Don't include a default value if it's a nullable field and the # default cannot be dropped in the ALTER COLUMN statement (e.g. # MySQL longtext and longblob). not (null and self.skip_default_on_alter(field)) ) if include_default: default_value = self.effective_default(field) if default_value is not None: column_default = "DEFAULT " + self._column_default_sql(field) if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter # (Oracle, SQLite). If this is the case, the individual # schema backend should implement prepare_default(). yield column_default % self.prepare_default(default_value) else: yield column_default params.append(default_value) # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if ( field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls ): null = True if field.generated: generated_sql, generated_params = self._column_generated_sql(field) params.extend(generated_params) yield generated_sql elif not null: yield "NOT NULL" elif not self.connection.features.implied_column_null: yield "NULL" if field.primary_key: yield "PRIMARY KEY" elif field.unique: yield "UNIQUE" # Optionally add the tablespace if it's an implicitly indexed column. tablespace = field.db_tablespace or model._meta.db_tablespace if ( tablespace and self.connection.features.supports_tablespaces and field.unique ): yield self.connection.ops.tablespace_sql(tablespace, inline=True) if self.connection.features.supports_comments_inline and field.db_comment: yield self._comment_sql(field.db_comment) def column_sql(self, model, field, include_default=False): """ Return the column definition for a field. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL. field_db_params = field.db_parameters(connection=self.connection) column_db_type = field_db_params["type"] # Check for fields that aren't actually columns (e.g. M2M). if column_db_type is None: return None, None params = [] return ( " ".join( # This appends to the params being returned. self._iter_column_sql( column_db_type, params, model, field, field_db_params, include_default, ) ), params, ) def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def skip_default_on_alter(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob) in the ALTER COLUMN statement. """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( "subclasses of BaseDatabaseSchemaEditor for backends which have " "requires_literal_defaults must provide a prepare_default() method" ) def _column_default_sql(self, field): """ Return the SQL to use in a DEFAULT clause. The resulting string should contain a '%s' placeholder for a default value. """ return "%s" def db_default_sql(self, field): """Return the sql and params for the field's database default.""" from django.db.models.expressions import Value db_default = field._db_default_expression sql = ( self._column_default_sql(field) if isinstance(db_default, Value) else "(%s)" ) query = Query(model=field.model) compiler = query.get_compiler(connection=self.connection) default_sql, params = compiler.compile(db_default) if self.connection.features.requires_literal_defaults: # Some databases don't support parameterized defaults (Oracle, # SQLite). If this is the case, the individual schema backend # should implement prepare_default(). default_sql %= tuple(self.prepare_default(p) for p in params) params = [] return sql % default_sql, params def _column_generated_persistency_sql(self, field): """Return the SQL to define the persistency of generated fields.""" return "STORED" if field.db_persist else "VIRTUAL" def _column_generated_sql(self, field): """Return the SQL to use in a GENERATED ALWAYS clause.""" expression_sql, params = field.generated_sql(self.connection) persistency_sql = self._column_generated_persistency_sql(field) if self.connection.features.requires_literal_defaults: expression_sql = expression_sql % tuple(self.quote_value(p) for p in params) params = () return f"GENERATED ALWAYS AS ({expression_sql}) {persistency_sql}", params @staticmethod def _effective_default(field): # This method allows testing its logic without a connection. if field.has_default(): default = field.get_default() elif field.generated: default = None elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = b"" else: default = "" elif getattr(field, "auto_now", False) or getattr(field, "auto_now_add", False): internal_type = field.get_internal_type() if internal_type == "DateTimeField": default = timezone.now() else: default = datetime.now() if internal_type == "DateField": default = default.date() elif internal_type == "TimeField": default = default.time() else: default = None return default def effective_default(self, field): """Return a field's effective database default value.""" return field.get_db_prep_save(self._effective_default(field), self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ sql, params = self.table_sql(model) # Prevent using [] as params, in the case a literal '%' is used in the # definition on backends that don't support parametrized DDL. self.execute(sql, params or None) if self.connection.features.supports_comments: # Add table comment. if model._meta.db_table_comment: self.alter_db_table_comment(model, None, model._meta.db_table_comment) # Add column comments. if not self.connection.features.supports_comments_inline: for field in model._meta.local_fields: if field.db_comment: field_db_params = field.db_parameters( connection=self.connection ) field_type = field_db_params["type"] self.execute( *self._alter_column_comment_sql( model, field, field_type, field.db_comment ) ) # Add any field index (deferred as SQLite _remake_table needs it). self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None # Index.create_sql returns interpolated SQL which makes params=None a # necessity to avoid escaping attempts on execution. self.execute(index.create_sql(model, self), params=None) def remove_index(self, model, index): """Remove an index from a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None self.execute(index.remove_sql(model, self)) def rename_index(self, model, old_index, new_index): if self.connection.features.can_rename_index: self.execute( self._rename_index_sql(model, old_index.name, new_index.name), params=None, ) else: self.remove_index(model, old_index) self.add_index(model, new_index) def add_constraint(self, model, constraint): """Add a constraint to a model.""" sql = constraint.create_sql(model, self) if sql: # Constraint.create_sql returns interpolated SQL which makes # params=None a necessity to avoid escaping attempts on execution. self.execute(sql, params=None) def remove_constraint(self, model, constraint): """Remove a constraint from a model.""" sql = constraint.remove_sql(model, self) if sql: self.execute(sql) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index( model, fields, {"unique": True, "primary_key": False}, self.sql_delete_unique, ) # Created uniques for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_unique_sql(model, fields)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index( model, fields, {"index": True, "unique": False}, self.sql_delete_index, ) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields=fields, suffix="_idx")) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): meta_constraint_names = { constraint.name for constraint in model._meta.constraints } meta_index_names = {constraint.name for constraint in model._meta.indexes} columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names( model, columns, exclude=meta_constraint_names | meta_index_names, **constraint_kwargs, ) if ( constraint_kwargs.get("unique") is True and constraint_names and self.connection.features.allows_multiple_constraints_on_same_fields ): # Constraint matching the unique_together name. default_name = str( self._unique_constraint_name(model._meta.db_table, columns, quote=False) ) if default_name in constraint_names: constraint_names = [default_name] if len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), ) ) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if old_db_table == new_db_table or ( self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower() ): return self.execute( self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), } ) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_table_comment(self, model, old_db_table_comment, new_db_table_comment): if self.sql_alter_table_comment and self.connection.features.supports_comments: self.execute( self.sql_alter_table_comment % { "table": self.quote_name(model._meta.db_table), "comment": self.quote_value(new_db_table_comment or ""), } ) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute( self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), } ) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return if col_type_suffix := field.db_type_suffix(connection=self.connection): definition += f" {col_type_suffix}" # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params["check"]: definition += " " + self.sql_check_constraint % db_params if ( field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint ): constraint_suffix = "_fk_%(to_table)s_%(to_column)s" # Add FK constraint inline, if supported. if self.sql_create_column_inline_fk: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field( field.remote_field.field_name ).column namespace, _ = split_identifier(model._meta.db_table) definition += " " + self.sql_create_column_inline_fk % { "name": self._fk_constraint_name(model, field, constraint_suffix), "namespace": ( "%s." % self.quote_name(namespace) if namespace else "" ), "column": self.quote_name(field.column), "to_table": self.quote_name(to_table), "to_column": self.quote_name(to_column), "deferrable": self.connection.ops.deferrable_sql(), "on_delete_db": self._create_on_delete_sql(model, field), } # Otherwise, add FK constraints later. else: self.deferred_sql.append( self._create_fk_sql(model, field, constraint_suffix) ) # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } # Prevent using [] as params, in the case a literal '%' is used in the # definition on backends that don't support parametrized DDL. self.execute(sql, params or None) # Drop the default if we need to if ( not field.has_db_default() and not self.skip_default_on_alter(field) and self.effective_default(field) is not None ): changes_sql, params = self._alter_column_default_sql( model, None, field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add field comment, if required. if ( field.db_comment and self.connection.features.supports_comments and not self.connection.features.supports_comments_inline ): field_type = db_params["type"] self.execute( *self._alter_column_comment_sql( model, field, field_type, field.db_comment ) ) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column( model._meta.db_table, field.column ): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ if not self._field_should_be_altered(old_field, new_field): return # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params["type"] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params["type"] modifying_generated_field = False if (old_type is None and old_field.remote_field is None) or ( new_type is None and new_field.remote_field is None ): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created ) ): return self._alter_many_to_many(model, old_field, new_field, strict) elif ( old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created ) ): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) elif old_field.generated != new_field.generated or ( new_field.generated and old_field.db_persist != new_field.db_persist ): modifying_generated_field = True elif new_field.generated: try: old_field_sql = old_field.generated_sql(self.connection) except FieldError: # Field used in a generated field was renamed. modifying_generated_field = True else: new_field_sql = new_field.generated_sql(self.connection) modifying_generated_field = old_field_sql != new_field_sql db_features = self.connection.features # Some databases (e.g. Oracle) don't allow altering a data type # for generated columns. if ( not modifying_generated_field and old_type != new_type and not db_features.supports_alter_generated_column_data_type ): modifying_generated_field = True if modifying_generated_field: raise ValueError( f"Modifying GeneratedFields is not supported - the field {new_field} " "must be removed and re-added with the new definition." ) self._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) def _field_db_check(self, field, field_db_params): # Always check constraints with the same mocked column name to avoid # recreating constraints when the column is renamed. check_constraints = self.connection.data_type_check_constraints data = field.db_type_parameters(self.connection) data["column"] = "__column_name__" try: return check_constraints[field.get_internal_type()] % data except KeyError: return None def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if ( self.connection.features.supports_foreign_keys and old_field.remote_field and old_field.db_constraint and self._field_should_be_altered( old_field, new_field, ignore={"db_comment"}, ) ): fk_names = self._constraint_names( model, [old_field.column], foreign_key=True ) if strict and len(fk_names) != 1: raise ValueError( "Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, ) ) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_fk_sql(model, fk_name)) # Has unique been removed? if old_field.unique and ( not new_field.unique or self._field_became_primary_key(old_field, new_field) ): # Find the unique constraint for this field meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], unique=True, primary_key=False, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_unique_sql(model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") drop_foreign_keys = ( self.connection.features.supports_foreign_keys and ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and ((old_type != new_type) or (old_collation != new_collation)) ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_fk_sql(new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field | new_field # db_index | unique | db_index | unique # ------------------------------------- # True | False | False | False # True | False | False | True # True | False | True | True if ( old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique) ): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names( model, [old_field.column], index=True, type_=Index.suffix, exclude=meta_index_names, ) for index_name in index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_index_sql(model, index_name)) # Change check constraints? old_db_check = self._field_db_check(old_field, old_db_params) new_db_check = self._field_db_check(new_field, new_db_params) if old_db_check != new_db_check and old_db_check: meta_constraint_names = { constraint.name for constraint in model._meta.constraints } constraint_names = self._constraint_names( model, [old_field.column], check=True, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, ) ) for constraint_name in constraint_names: self.execute(self._delete_check_sql(model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references( model._meta.db_table, old_field.column, new_field.column ) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Type suffix change? (e.g. auto increment). old_type_suffix = old_field.db_type_suffix(connection=self.connection) new_type_suffix = new_field.db_type_suffix(connection=self.connection) # Type, collation, or comment change? if ( old_type != new_type or old_type_suffix != new_type_suffix or old_collation != new_collation or ( self.connection.features.supports_comments and old_field.db_comment != new_field.db_comment ) ): fragment, other_actions = self._alter_column_type_sql( model, old_field, new_field, new_type, old_collation, new_collation ) actions.append(fragment) post_actions.extend(other_actions) if new_field.has_db_default(): if ( not old_field.has_db_default() or new_field.db_default != old_field.db_default ): actions.append( self._alter_column_database_default_sql(model, old_field, new_field) ) elif old_field.has_db_default(): actions.append( self._alter_column_database_default_sql( model, old_field, new_field, drop=True ) ) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? needs_database_default = False if old_field.null and not new_field.null and not new_field.has_db_default(): old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) if ( not self.skip_default_on_alter(new_field) and old_default != new_default and new_default is not None ): needs_database_default = True actions.append( self._alter_column_default_sql(model, old_field, new_field) ) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() or new_field.has_db_default() ) and (old_field.null and not new_field.null) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions += null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), tuple(chain(*params)))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: if not new_field.has_db_default(): default_sql = "%s" params = [new_default] else: default_sql, params = self.db_default_sql(new_field) # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": default_sql, }, params, ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field | new_field # db_index | unique | db_index | unique # ------------------------------------- # False | False | True | False # False | True | True | False # True | True | True | False if ( (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique ): self.execute(self._create_index_sql(model, fields=[new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if drop_foreign_keys: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff # above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params["type"] rel_collation = rel_db_params.get("collation") old_rel_db_params = old_rel.field.db_parameters(connection=self.connection) old_rel_collation = old_rel_db_params.get("collation") fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type, old_rel_collation, rel_collation, ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if ( self.connection.features.supports_foreign_keys and new_field.remote_field and ( fks_dropped or not old_field.remote_field or not old_field.db_constraint ) and new_field.db_constraint ): self.execute( self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s") ) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for _, rel in rels_to_update: if rel.field.db_constraint: self.execute( self._create_fk_sql(rel.related_model, rel.field, "_fk") ) # Does it have check constraints we need to add? if old_db_check != new_db_check and new_db_check: constraint_name = self._create_index_name( model._meta.db_table, [new_field.column], suffix="_check" ) self.execute( self._create_check_sql(model, constraint_name, new_db_params["check"]) ) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql( model, old_field, new_field, drop=True ) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if ( self.connection.features.interprets_empty_strings_as_nulls and new_field.empty_strings_allowed ): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = ( self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null ) return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = self._column_default_sql(new_field) params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) if drop: if new_field.null: sql = self.sql_alter_column_no_default_null else: sql = self.sql_alter_column_no_default else: sql = self.sql_alter_column_default return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], "default": default, }, params, ) def _alter_column_database_default_sql( self, model, old_field, new_field, drop=False ): """ Hook to specialize column database default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ if drop: sql = self.sql_alter_column_no_default default_sql = "" params = [] else: sql = self.sql_alter_column_default default_sql, params = self.db_default_sql(new_field) new_db_params = new_field.db_parameters(connection=self.connection) return ( sql % { "column": self.quote_name(new_field.column), "type": new_db_params["type"], "default": default_sql, }, params, ) def _alter_column_type_sql( self, model, old_field, new_field, new_type, old_collation, new_collation ): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a 2-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ other_actions = [] if collate_sql := self._collate_sql( new_collation, old_collation, model._meta.db_table ): collate_sql = f" {collate_sql}" else: collate_sql = "" # Comment change? comment_sql = "" if self.connection.features.supports_comments and not new_field.many_to_many: if old_field.db_comment != new_field.db_comment: # PostgreSQL and Oracle can't execute 'ALTER COLUMN ...' and # 'COMMENT ON ...' at the same time. sql, params = self._alter_column_comment_sql( model, new_field, new_type, new_field.db_comment ) if sql: other_actions.append((sql, params)) if new_field.db_comment: comment_sql = self._comment_sql(new_field.db_comment) return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, "collation": collate_sql, "comment": comment_sql, }, [], ), other_actions, ) def _alter_column_comment_sql(self, model, new_field, new_type, new_db_comment): return ( self.sql_alter_column_comment % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "comment": self._comment_sql(new_db_comment), }, [], ) def _comment_sql(self, comment): return self.quote_value(comment or "") def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if ( old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table ): self.alter_db_table( old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # The field that points to the target model is needed, so we can # tell alter_field to change it - this is m2m_reverse_field_name() # (as opposed to m2m_field_name(), which points to our model). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other # end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = "%s%s" % ( names_digest(table_name, *column_names, length=8), suffix, ) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[: max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = "%s_%s_%s" % ( table_name[:other_length], "_".join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif settings.DEFAULT_INDEX_TABLESPACE: db_tablespace = settings.DEFAULT_INDEX_TABLESPACE elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return " " + self.connection.ops.tablespace_sql(db_tablespace) return "" def _index_condition_sql(self, condition): if condition: return " WHERE " + condition return "" def _index_include_sql(self, model, columns): if not columns or not self.connection.features.supports_covering_indexes: return "" return Statement( " INCLUDE (%(columns)s)", columns=Columns(model._meta.db_table, columns, self.quote_name), ) def _create_index_sql( self, model, *, fields=None, name=None, suffix="", using="", db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, include=None, expressions=None, ): """ Return the SQL statement to create the index for one or several fields or expressions. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ fields = fields or [] expressions = expressions or [] compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection, ) tablespace_sql = self._get_index_tablespace_sql( model, fields, db_tablespace=db_tablespace ) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=( self._index_columns(table, columns, col_suffixes, opclasses) if columns else Expressions(table, expressions, compiler, self.quote_value) ), extra=tablespace_sql, condition=self._index_condition_sql(condition), include=self._index_include_sql(model, include), ) def _delete_index_sql(self, model, name, sql=None): statement = Statement( sql or self.sql_delete_index, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) # Remove all deferred statements referencing the deleted index. table_name = statement.parts["table"].table index_name = statement.parts["name"] for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_index( table_name, index_name ): self.deferred_sql.remove(sql) return statement def _rename_index_sql(self, model, old_name, new_name): return Statement( self.sql_rename_index, table=Table(model._meta.db_table, self.quote_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name), ) def _create_on_delete_sql(self, model, field): remote_field = field.remote_field try: return remote_field.on_delete.on_delete_sql(self) except AttributeError: return "" def _index_columns(self, table, columns, col_suffixes, opclasses): return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for index in model._meta.indexes: if ( not index.contains_expressions or self.connection.features.supports_expression_indexes ): output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, fields=[field])) return output def _field_should_be_altered(self, old_field, new_field, ignore=None): if (not (old_field.concrete or old_field.many_to_many)) and ( not (new_field.concrete or new_field.many_to_many) ): return False ignore = ignore or set() _, old_path, old_args, old_kwargs = old_field.deconstruct() _, new_path, new_args, new_kwargs = new_field.deconstruct() # Don't alter when: # - changing only a field name (unless it's a many-to-many) # - changing an attribute that doesn't affect the schema # - changing an attribute in the provided set of ignored attributes # - adding only a db_column and the column name is not changed # - db_table does not change for model referenced by foreign keys for attr in ignore.union(old_field.non_db_attrs): old_kwargs.pop(attr, None) for attr in ignore.union(new_field.non_db_attrs): new_kwargs.pop(attr, None) if ( not new_field.many_to_many and old_field.remote_field and new_field.remote_field and old_field.remote_field.model._meta.db_table == new_field.remote_field.model._meta.db_table ): old_kwargs.pop("to", None) new_kwargs.pop("to", None) # db_default can take many forms but result in the same SQL. if ( old_kwargs.get("db_default") and new_kwargs.get("db_default") and self.db_default_sql(old_field) == self.db_default_sql(new_field) ): old_kwargs.pop("db_default") new_kwargs.pop("db_default") if ( old_field.concrete and new_field.concrete and (self.quote_name(old_field.column) != self.quote_name(new_field.column)) ): return True if ( old_field.many_to_many and new_field.many_to_many and old_field.name != new_field.name ): return True return (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs) def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return ( not new_field.primary_key and new_field.unique and (not old_field.unique or old_field.primary_key) ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): table = Table(model._meta.db_table, self.quote_name) name = self._fk_constraint_name(model, field, suffix) column = Columns(model._meta.db_table, [field.column], self.quote_name) to_table = Table(field.target_field.model._meta.db_table, self.quote_name) to_column = Columns( field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name, ) deferrable = self.connection.ops.deferrable_sql() return Statement( self.sql_create_fk, table=table, name=name, column=column, to_table=to_table, to_column=to_column, deferrable=deferrable, on_delete_db=self._create_on_delete_sql(model, field), ) def _fk_constraint_name(self, model, field, suffix): def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return ForeignKeyName( model._meta.db_table, [field.column], split_identifier(field.target_field.model._meta.db_table)[1], [field.target_field.column], suffix, create_fk_name, ) def _delete_fk_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_fk, model, name) def _deferrable_constraint_sql(self, deferrable): if deferrable is None: return "" if deferrable == Deferrable.DEFERRED: return " DEFERRABLE INITIALLY DEFERRED" if deferrable == Deferrable.IMMEDIATE: return " DEFERRABLE INITIALLY IMMEDIATE" def _unique_index_nulls_distinct_sql(self, nulls_distinct): if nulls_distinct is False: return " NULLS NOT DISTINCT" elif nulls_distinct is True: return " NULLS DISTINCT" return "" def _unique_supported( self, condition=None, deferrable=None, include=None, expressions=None, nulls_distinct=None, ): return ( (not condition or self.connection.features.supports_partial_indexes) and ( not deferrable or self.connection.features.supports_deferrable_unique_constraints ) and (not include or self.connection.features.supports_covering_indexes) and ( not expressions or self.connection.features.supports_expression_indexes ) and ( nulls_distinct is None or self.connection.features.supports_nulls_distinct_unique_constraints ) ) def _unique_sql( self, model, fields, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, nulls_distinct=None, ): if not self._unique_supported( condition=condition, deferrable=deferrable, include=include, expressions=expressions, nulls_distinct=nulls_distinct, ): return None if ( condition or include or opclasses or expressions or nulls_distinct is not None ): # Databases support conditional, covering, functional unique, # and nulls distinct constraints via a unique index. sql = self._create_unique_sql( model, fields, name=name, condition=condition, include=include, opclasses=opclasses, expressions=expressions, nulls_distinct=nulls_distinct, ) if sql: self.deferred_sql.append(sql) return None constraint = self.sql_unique_constraint % { "columns": ", ".join([self.quote_name(field.column) for field in fields]), "deferrable": self._deferrable_constraint_sql(deferrable), } return self.sql_constraint % { "name": self.quote_name(name), "constraint": constraint, } def _create_unique_sql( self, model, fields, name=None, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, nulls_distinct=None, ): if not self._unique_supported( condition=condition, deferrable=deferrable, include=include, expressions=expressions, nulls_distinct=nulls_distinct, ): return None compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection ) table = model._meta.db_table columns = [field.column for field in fields] if name is None: name = self._unique_constraint_name(table, columns, quote=True) else: name = self.quote_name(name) if condition or include or opclasses or expressions: sql = self.sql_create_unique_index else: sql = self.sql_create_unique if columns: columns = self._index_columns( table, columns, col_suffixes=(), opclasses=opclasses ) else: columns = Expressions(table, expressions, compiler, self.quote_value) return Statement( sql, table=Table(table, self.quote_name), name=name, columns=columns, condition=self._index_condition_sql(condition), deferrable=self._deferrable_constraint_sql(deferrable), include=self._index_include_sql(model, include), nulls_distinct=self._unique_index_nulls_distinct_sql(nulls_distinct), ) def _unique_constraint_name(self, table, columns, quote=True): if quote: def create_unique_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) else: create_unique_name = self._create_index_name return IndexName(table, columns, "_uniq", create_unique_name) def _delete_unique_sql( self, model, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, nulls_distinct=None, ): if not self._unique_supported( condition=condition, deferrable=deferrable, include=include, expressions=expressions, nulls_distinct=nulls_distinct, ): return None if condition or include or opclasses or expressions: sql = self.sql_delete_index else: sql = self.sql_delete_unique return self._delete_constraint_sql(sql, model, name) def _check_sql(self, name, check): return self.sql_constraint % { "name": self.quote_name(name), "constraint": self.sql_check_constraint % {"check": check}, } def _create_check_sql(self, model, name, check): if not self.connection.features.supports_table_check_constraints: return None return Statement( self.sql_create_check, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), check=check, ) def _delete_check_sql(self, model, name): if not self.connection.features.supports_table_check_constraints: return None return self._delete_constraint_sql(self.sql_delete_check, model, name) def _delete_constraint_sql(self, template, model, name): return Statement( template, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _constraint_names( self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None, ): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ ( self.connection.introspection.identifier_converter( truncate_name(name, self.connection.ops.max_name_length()) ) if self.connection.features.truncates_names else self.connection.introspection.identifier_converter(name) ) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints( cursor, model._meta.db_table ) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict["columns"]: if unique is not None and infodict["unique"] != unique: continue if primary_key is not None and infodict["primary_key"] != primary_key: continue if index is not None and infodict["index"] != index: continue if check is not None and infodict["check"] != check: continue if foreign_key is not None and not infodict["foreign_key"]: continue if type_ is not None and infodict["type"] != type_: continue if not exclude or name not in exclude: result.append(name) return result def _pk_constraint_sql(self, columns): return self.sql_pk_constraint % { "columns": ", ".join(self.quote_name(column) for column in columns) } def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError( "Found wrong number (%s) of PK constraints for %s" % ( len(constraint_names), model._meta.db_table, ) ) for constraint_name in constraint_names: self.execute(self._delete_primary_key_sql(model, constraint_name)) def _create_primary_key_sql(self, model, field): return Statement( self.sql_create_pk, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name( self._create_index_name( model._meta.db_table, [field.column], suffix="_pk" ) ), columns=Columns(model._meta.db_table, [field.column], self.quote_name), ) def _delete_primary_key_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_pk, model, name) def _collate_sql(self, collation, old_collation=None, table_name=None): return "COLLATE " + self.quote_name(collation) if collation else "" def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { "procedure": self.quote_name(procedure_name), "param_types": ",".join(param_types), } self.execute(sql)
python
github
https://github.com/django/django
django/db/backends/base/schema.py
# -*- coding: utf-8 -*- """ Parse, stream, create, sign and verify Bitcoin transactions as Tx structures. The MIT License (MIT) Copyright (c) 2013 by Richard Kiss Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import io import warnings from ..encoding import double_sha256, from_bytes_32 from ..serialize import b2h, b2h_rev, h2b, h2b_rev from ..serialize.bitcoin_streamer import parse_struct, stream_struct from ..intbytes import byte_to_int, int_to_bytes from .TxIn import TxIn from .TxOut import TxOut from .Spendable import Spendable from .pay_to import script_obj_from_script, SolvingError, ScriptPayToScript from .script import opcodes from .script import tools SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_ANYONECANPAY = 0x80 class ValidationFailureError(Exception): pass class BadSpendableError(Exception): pass class Tx(object): @classmethod def coinbase_tx(class_, public_key_sec, coin_value, coinbase_bytes=b'', version=1, lock_time=0): """ Create the special "first in block" transaction that includes the mining fees. """ tx_in = TxIn.coinbase_tx_in(script=coinbase_bytes) COINBASE_SCRIPT_OUT = "%s OP_CHECKSIG" script_text = COINBASE_SCRIPT_OUT % b2h(public_key_sec) script_bin = tools.compile(script_text) tx_out = TxOut(coin_value, script_bin) return class_(version, [tx_in], [tx_out], lock_time) @classmethod def parse(class_, f): """Parse a Bitcoin transaction Tx from the file-like object f.""" version, count = parse_struct("LI", f) txs_in = [] for i in range(count): txs_in.append(TxIn.parse(f)) count, = parse_struct("I", f) txs_out = [] for i in range(count): txs_out.append(TxOut.parse(f)) lock_time, = parse_struct("L", f) return class_(version, txs_in, txs_out, lock_time) @classmethod def from_hex(class_, hex_string): """Return the Tx for the given hex string.""" f = io.BytesIO(h2b(hex_string)) tx = class_.parse(f) try: tx.parse_unspents(f) except Exception: # parsing unspents failed tx.unspents = [] return tx @classmethod def tx_from_hex(class_, hex_string): warnings.simplefilter('always', DeprecationWarning) warnings.warn("Call to deprecated function tx_from_hex, use from_hex instead", category=DeprecationWarning, stacklevel=2) warnings.simplefilter('default', DeprecationWarning) return class_.from_hex(hex_string) def __init__(self, version, txs_in, txs_out, lock_time=0, unspents=[]): self.version = version self.txs_in = txs_in self.txs_out = txs_out self.lock_time = lock_time self.unspents = unspents def stream(self, f, blank_solutions=False): """Stream a Bitcoin transaction Tx to the file-like object f.""" stream_struct("LI", f, self.version, len(self.txs_in)) for t in self.txs_in: t.stream(f, blank_solutions=blank_solutions) stream_struct("I", f, len(self.txs_out)) for t in self.txs_out: t.stream(f) stream_struct("L", f, self.lock_time) def as_bin(self, include_unspents=False): """Return the transaction as binary.""" f = io.BytesIO() self.stream(f) if include_unspents and not self.missing_unspents(): self.stream_unspents(f) return f.getvalue() def as_hex(self, include_unspents=False): """Return the transaction as hex.""" return b2h(self.as_bin(include_unspents=include_unspents)) def hash(self, hash_type=None): """Return the hash for this Tx object.""" s = io.BytesIO() self.stream(s) if hash_type: stream_struct("L", s, hash_type) return double_sha256(s.getvalue()) def blanked_hash(self): """ Return the hash for this Tx object with solution scripts blanked. Useful for determining if two Txs might be equivalent modulo malleability. (That is, even if tx1 is morphed into tx2 using the malleability weakness, they will still have the same blanked hash.) """ s = io.BytesIO() self.stream(s, blank_solutions=True) return double_sha256(s.getvalue()) def id(self): """Return the human-readable hash for this Tx object.""" return b2h_rev(self.hash()) def signature_hash(self, tx_out_script, unsigned_txs_out_idx, hash_type): """ Return the canonical hash for a transaction. We need to remove references to the signature, since it's a signature of the hash before the signature is applied. tx_out_script: the script the coins for unsigned_txs_out_idx are coming from unsigned_txs_out_idx: where to put the tx_out_script hash_type: one of SIGHASH_NONE, SIGHASH_SINGLE, SIGHASH_ALL, optionally bitwise or'ed with SIGHASH_ANYONECANPAY """ # In case concatenating two scripts ends up with two codeseparators, # or an extra one at the end, this prevents all those possible incompatibilities. tx_out_script = tools.delete_subscript(tx_out_script, int_to_bytes(opcodes.OP_CODESEPARATOR)) # blank out other inputs' signatures def tx_in_for_idx(idx, tx_in): if idx == unsigned_txs_out_idx: return TxIn(tx_in.previous_hash, tx_in.previous_index, tx_out_script, tx_in.sequence) return TxIn(tx_in.previous_hash, tx_in.previous_index, b'', tx_in.sequence) txs_in = [tx_in_for_idx(i, tx_in) for i, tx_in in enumerate(self.txs_in)] txs_out = self.txs_out # Blank out some of the outputs if (hash_type & 0x1f) == SIGHASH_NONE: # Wildcard payee txs_out = [] # Let the others update at will for i in range(len(txs_in)): if i != unsigned_txs_out_idx: txs_in[i].sequence = 0 elif (hash_type & 0x1f) == SIGHASH_SINGLE: # This preserves the ability to validate existing legacy # transactions which followed a buggy path in Satoshi's # original code; note that higher level functions for signing # new transactions (e.g., is_signature_ok and sign_tx_in) # check to make sure we never get here (or at least they # should) if unsigned_txs_out_idx >= len(txs_out): # This should probably be moved to a constant, but the # likelihood of ever getting here is already really small # and getting smaller return (1 << 248) # Only lock in the txout payee at same index as txin; delete # any outputs after this one and set all outputs before this # one to "null" (where "null" means an empty script and a # value of -1) txs_out = [TxOut(0xffffffffffffffff, b'')] * unsigned_txs_out_idx txs_out.append(self.txs_out[unsigned_txs_out_idx]) # Let the others update at will for i in range(len(self.txs_in)): if i != unsigned_txs_out_idx: txs_in[i].sequence = 0 # Blank out other inputs completely, not recommended for open transactions if hash_type & SIGHASH_ANYONECANPAY: txs_in = [txs_in[unsigned_txs_out_idx]] tmp_tx = Tx(self.version, txs_in, txs_out, self.lock_time) return from_bytes_32(tmp_tx.hash(hash_type=hash_type)) def solve(self, hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs): """ Sign a standard transaction. hash160_lookup: An object with a get method that accepts a hash160 and returns the corresponding (secret exponent, public_pair, is_compressed) tuple or None if it's unknown (in which case the script will obviously not be signed). A standard dictionary will do nicely here. tx_in_idx: the index of the tx_in we are currently signing tx_out: the tx_out referenced by the given tx_in """ tx_in = self.txs_in[tx_in_idx] is_p2h = (len(tx_out_script) == 23 and byte_to_int(tx_out_script[0]) == opcodes.OP_HASH160 and byte_to_int(tx_out_script[-1]) == opcodes.OP_EQUAL) if is_p2h: hash160 = ScriptPayToScript.from_script(tx_out_script).hash160 p2sh_lookup = kwargs.get("p2sh_lookup") if p2sh_lookup is None: raise ValueError("p2sh_lookup not set") if hash160 not in p2sh_lookup: raise ValueError("hash160=%s not found in p2sh_lookup" % b2h(hash160)) script_to_hash = p2sh_lookup[hash160] else: script_to_hash = tx_out_script # Leave out the signature from the hash, since a signature can't sign itself. # The checksig op will also drop the signatures from its hash. signature_for_hash_type_f = lambda hash_type, script: self.signature_hash( script, tx_in_idx, hash_type) if tx_in.verify(tx_out_script, signature_for_hash_type_f): return sign_value = self.signature_hash(script_to_hash, tx_in_idx, hash_type=hash_type) the_script = script_obj_from_script(tx_out_script) solution = the_script.solve( hash160_lookup=hash160_lookup, sign_value=sign_value, signature_type=hash_type, existing_script=self.txs_in[tx_in_idx].script, **kwargs) return solution def sign_tx_in(self, hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs): self.txs_in[tx_in_idx].script = self.solve(hash160_lookup, tx_in_idx, tx_out_script, hash_type=SIGHASH_ALL, **kwargs) def verify_tx_in(self, tx_in_idx, tx_out_script, expected_hash_type=None): tx_in = self.txs_in[tx_in_idx] signature_for_hash_type_f = lambda hash_type, script: self.signature_hash(script, tx_in_idx, hash_type) if not tx_in.verify(tx_out_script, signature_for_hash_type_f, expected_hash_type): raise ValidationFailureError( "just signed script Tx %s TxIn index %d did not verify" % ( b2h_rev(tx_in.previous_hash), tx_in_idx)) def total_out(self): return sum(tx_out.coin_value for tx_out in self.txs_out) def tx_outs_as_spendable(self, block_index_available=0): h = self.hash() return [ Spendable(tx_out.coin_value, tx_out.script, h, tx_out_index, block_index_available) for tx_out_index, tx_out in enumerate(self.txs_out)] def is_coinbase(self): return len(self.txs_in) == 1 and self.txs_in[0].is_coinbase() def __str__(self): return "Tx [%s]" % self.id() def __repr__(self): return "Tx [%s] (v:%d) [%s] [%s]" % ( self.id(), self.version, ", ".join(str(t) for t in self.txs_in), ", ".join(str(t) for t in self.txs_out)) """ The functions below here deal with an optional additional parameter: "unspents". This parameter is a list of tx_out objects that are referenced by the list of self.tx_in objects. """ def unspents_from_db(self, tx_db, ignore_missing=False): unspents = [] for tx_in in self.txs_in: if tx_in.is_coinbase(): unspents.append(None) continue tx = tx_db.get(tx_in.previous_hash) if tx and tx.hash() == tx_in.previous_hash: unspents.append(tx.txs_out[tx_in.previous_index]) elif ignore_missing: unspents.append(None) else: raise KeyError( "can't find tx_out for %s:%d" % (b2h_rev(tx_in.previous_hash), tx_in.previous_index)) self.unspents = unspents def set_unspents(self, unspents): if len(unspents) != len(self.txs_in): raise ValueError("wrong number of unspents") self.unspents = unspents def missing_unspent(self, idx): if self.is_coinbase(): return True if len(self.unspents) <= idx: return True return self.unspents[idx] is None def missing_unspents(self): if self.is_coinbase(): return False return (len(self.unspents) != len(self.txs_in) or any(self.missing_unspent(idx) for idx, tx_in in enumerate(self.txs_in))) def check_unspents(self): if self.missing_unspents(): raise ValueError("wrong number of unspents. Call unspents_from_db or set_unspents.") def txs_in_as_spendable(self): return [ Spendable(tx_out.coin_value, tx_out.script, tx_in.previous_hash, tx_in.previous_index) for tx_in_index, (tx_in, tx_out) in enumerate(zip(self.txs_in, self.unspents))] def stream_unspents(self, f): self.check_unspents() for tx_out in self.unspents: if tx_out is None: tx_out = TxOut(0, b'') tx_out.stream(f) def parse_unspents(self, f): unspents = [] for i in enumerate(self.txs_in): tx_out = TxOut.parse(f) if tx_out.coin_value == 0: tx_out = None unspents.append(tx_out) self.set_unspents(unspents) def is_signature_ok(self, tx_in_idx): tx_in = self.txs_in[tx_in_idx] if tx_in.is_coinbase(): return True if len(self.unspents) <= tx_in_idx: return False unspent = self.unspents[tx_in_idx] if unspent is None: return False tx_out_script = self.unspents[tx_in_idx].script signature_for_hash_type_f = lambda hash_type, script: self.signature_hash( script, tx_in_idx, hash_type) return tx_in.verify(tx_out_script, signature_for_hash_type_f) def sign(self, hash160_lookup, hash_type=SIGHASH_ALL, **kwargs): """ Sign a standard transaction. hash160_lookup: A dictionary (or another object with .get) where keys are hash160 and values are tuples (secret exponent, public_pair, is_compressed) or None (in which case the script will obviously not be signed). """ self.check_unspents() for idx, tx_in in enumerate(self.txs_in): if self.is_signature_ok(idx) or tx_in.is_coinbase(): continue try: if self.unspents[idx]: self.sign_tx_in( hash160_lookup, idx, self.unspents[idx].script, hash_type=hash_type, **kwargs) except SolvingError: pass return self def bad_signature_count(self): count = 0 for idx, tx_in in enumerate(self.txs_in): if not self.is_signature_ok(idx): count += 1 return count def total_in(self): if self.is_coinbase(): return self.txs_out[0].coin_value self.check_unspents() return sum(tx_out.coin_value for tx_out in self.unspents) def fee(self): return self.total_in() - self.total_out() def validate_unspents(self, tx_db): """ Spendable objects returned from blockchain.info or similar services contain coin_value information that must be trusted on faith. Mistaken coin_value data can result in coins being wasted to fees. This function solves this problem by iterating over the incoming transactions, fetching them from the tx_db in full, and verifying that the coin_values are as expected. Returns the fee for this transaction. If any of the spendables set by tx.set_unspents do not match the authenticated transactions, a ValidationFailureError is raised. """ ZERO = b'\0' * 32 tx_hashes = set((tx_in.previous_hash for tx_in in self.txs_in)) # build a local copy of the DB tx_lookup = {} for h in tx_hashes: if h == ZERO: continue the_tx = tx_db.get(h) if the_tx is None: raise KeyError("hash id %s not in tx_db" % b2h_rev(h)) if the_tx.hash() != h: raise KeyError("attempt to load Tx %s yielded a Tx with id %s" % (h2b_rev(h), the_tx.id())) tx_lookup[h] = the_tx for idx, tx_in in enumerate(self.txs_in): if tx_in.previous_hash == ZERO: continue if tx_in.previous_hash not in tx_lookup: raise KeyError("hash id %s not in tx_lookup" % b2h_rev(tx_in.previous_hash)) txs_out = tx_lookup[tx_in.previous_hash].txs_out if tx_in.previous_index > len(txs_out): raise BadSpendableError("tx_out index %d is too big for Tx %s" % (tx_in.previous_index, b2h_rev(tx_in.previous_hash))) tx_out1 = txs_out[tx_in.previous_index] tx_out2 = self.unspents[idx] if tx_out1.coin_value != tx_out2.coin_value: raise BadSpendableError( "unspents[%d] coin value mismatch (%d vs %d)" % ( idx, tx_out1.coin_value, tx_out2.coin_value)) if tx_out1.script != tx_out2.script: raise BadSpendableError("unspents[%d] script mismatch!" % idx) return self.fee()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # daemon/version/__init__.py # Part of python-daemon, an implementation of PEP 3143. # # Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au> # This is free software: you may copy, modify, and/or distribute this work # under the terms of the Python Software Foundation License, version 2 or # later as published by the Python Software Foundation. # No warranty expressed or implied. See the file LICENSE.PSF-2 for details. """ Version information for the python-daemon distribution. """ from version_info import version_info version_info['version_string'] = u"1.5.1" version_short = u"%(version_string)s" % version_info version_full = u"%(version_string)s.r%(revno)s" % version_info version = version_short author_name = u"Ben Finney" author_email = u"ben+python@benfinney.id.au" author = u"%(author_name)s <%(author_email)s>" % vars() copyright_year_begin = u"2001" date = version_info['date'].split(' ', 1)[0] copyright_year = date.split('-')[0] copyright_year_range = copyright_year_begin if copyright_year > copyright_year_begin: copyright_year_range += u"–%(copyright_year)s" % vars() copyright = ( u"Copyright © %(copyright_year_range)s %(author)s and others" ) % vars() license = u"PSF-2+"
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import xhtml2pdf.pisa as pisa import StringIO import logging log = logging.getLogger("xhtml2pdf.wsgi") class Filter(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): script_name = environ.get('SCRIPT_NAME', '') path_info = environ.get('PATH_INFO', '') sent = [] written_response = StringIO.StringIO() def replacement_start_response(status, headers, exc_info=None): if not self.should_filter(status, headers): return start_response(status, headers, exc_info) else: sent[:] = [status, headers, exc_info] return written_response.write app_iter = self.app(environ, replacement_start_response) if not sent: return app_iter status, headers, exc_info = sent try: for chunk in app_iter: written_response.write(chunk) finally: if hasattr(app_iter, 'close'): app_iter.close() body = written_response.getvalue() status, headers, body = self.filter( script_name, path_info, environ, status, headers, body) start_response(status, headers, exc_info) return [body] def should_filter(self, status, headers): print headers def filter(self, status, headers, body): raise NotImplementedError class HTMLFilter(Filter): def should_filter(self, status, headers): if not status.startswith('200'): return False for name, value in headers: if name.lower() == 'content-type': return value.startswith('text/html') return False class PisaMiddleware(HTMLFilter): def filter(self, script_name, path_info, environ, status, headers, body): topdf = environ.get("pisa.topdf", "") if topdf: dst = StringIO.StringIO() pisa.CreatePDF(body, dst, show_error_as_pdf=True) headers = [ ("content-type", "application/pdf"), ("content-disposition", "attachment; filename=" + topdf) ] body = dst.getvalue() return status, headers, body
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # James Laska (jlaska@redhat.com) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: redhat_subscription short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command description: - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command version_added: "1.2" author: "Barnaby Court (@barnabycourt)" notes: - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID. - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl), I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf) config file and default to None. requirements: - subscription-manager options: state: description: - whether to register and subscribe (C(present)), or unregister (C(absent)) a system choices: [ "present", "absent" ] default: "present" username: description: - access.redhat.com or Sat6 username password: description: - access.redhat.com or Sat6 password server_hostname: description: - Specify an alternative Red Hat Subscription Management or Sat6 server server_insecure: description: - Enable or disable https server certificate verification when connecting to C(server_hostname) rhsm_baseurl: description: - Specify CDN baseurl server_proxy_hostname: description: - Specify a HTTP proxy hostname version_added: "2.4" server_proxy_port: description: - Specify a HTTP proxy port version_added: "2.4" server_proxy_user: description: - Specify a user for HTTP proxy with basic authentication version_added: "2.4" server_proxy_password: description: - Specify a password for HTTP proxy with basic authentication version_added: "2.4" auto_attach: description: - Upon successful registration, auto-consume available subscriptions - Added in favor of deprecated autosubscribe in 2.5. type: bool default: 'no' version_added: "2.5" aliases: [autosubscribe] activationkey: description: - supply an activation key for use with registration org_id: description: - Organization ID to use in conjunction with activationkey version_added: "2.0" environment: description: - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello version_added: "2.2" pool: description: - | Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if possible, as it is much faster. Mutually exclusive with I(pool_ids). default: '^$' pool_ids: description: - | Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster. A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)), or as a C(dict) with the pool ID as the key, and a quantity as the value (ex. C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this). Mutually exclusive with I(pool). default: [] version_added: "2.4" consumer_type: description: - The type of unit to register, defaults to system version_added: "2.1" consumer_name: description: - Name of the system to register, defaults to the hostname version_added: "2.1" consumer_id: description: - | References an existing consumer ID to resume using a previous registration for this system. If the system's identity certificate is lost or corrupted, this option allows it to resume using its previous identity and subscriptions. The default is to not specify a consumer ID so a new ID is created. version_added: "2.1" force_register: description: - Register the system even if it is already registered type: bool default: 'no' version_added: "2.2" ''' EXAMPLES = ''' - name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content. redhat_subscription: state: present username: joe_user password: somepass auto_attach: true - name: Same as above but subscribe to a specific pool by ID. redhat_subscription: state: present username: joe_user password: somepass pool_ids: 0123456789abcdef0123456789abcdef - name: Register and subscribe to multiple pools. redhat_subscription: state: present username: joe_user password: somepass pool_ids: - 0123456789abcdef0123456789abcdef - 1123456789abcdef0123456789abcdef - name: Same as above but consume multiple entitlements. redhat_subscription: state: present username: joe_user password: somepass pool_ids: - 0123456789abcdef0123456789abcdef: 2 - 1123456789abcdef0123456789abcdef: 4 - name: Register and pull existing system data. redhat_subscription: state: present username: joe_user password: somepass consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization redhat_subscription: state: present activationkey: 1-222333444 org_id: 222333444 pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$' - name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) redhat_subscription: state: present activationkey: 1-222333444 org_id: 222333444 pool: '^Red Hat Enterprise Server$' - name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe. redhat_subscription: state: present username: joe_user password: somepass environment: Library auto_attach: true ''' RETURN = ''' subscribed_pool_ids: description: List of pool IDs to which system is now subscribed returned: success type: complex contains: { "8a85f9815ab905d3015ab928c7005de4": "1" } ''' import os import re import shutil import tempfile from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.six.moves import configparser SUBMAN_CMD = None class RegistrationBase(object): def __init__(self, module, username=None, password=None): self.module = module self.username = username self.password = password def configure(self): raise NotImplementedError("Must be implemented by a sub-class") def enable(self): # Remove any existing redhat.repo redhat_repo = '/etc/yum.repos.d/redhat.repo' if os.path.isfile(redhat_repo): os.unlink(redhat_repo) def register(self): raise NotImplementedError("Must be implemented by a sub-class") def unregister(self): raise NotImplementedError("Must be implemented by a sub-class") def unsubscribe(self): raise NotImplementedError("Must be implemented by a sub-class") def update_plugin_conf(self, plugin, enabled=True): plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin if os.path.isfile(plugin_conf): tmpfd, tmpfile = tempfile.mkstemp() shutil.copy2(plugin_conf, tmpfile) cfg = configparser.ConfigParser() cfg.read([tmpfile]) if enabled: cfg.set('main', 'enabled', 1) else: cfg.set('main', 'enabled', 0) fd = open(tmpfile, 'w+') cfg.write(fd) fd.close() self.module.atomic_move(tmpfile, plugin_conf) def subscribe(self, **kwargs): raise NotImplementedError("Must be implemented by a sub-class") class Rhsm(RegistrationBase): def __init__(self, module, username=None, password=None): RegistrationBase.__init__(self, module, username, password) self.module = module def enable(self): ''' Enable the system to receive updates from subscription-manager. This involves updating affected yum plugins and removing any conflicting yum repositories. ''' RegistrationBase.enable(self) self.update_plugin_conf('rhnplugin', False) self.update_plugin_conf('subscription-manager', True) def configure(self, **kwargs): ''' Configure the system as directed for registration with RHSM Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'config'] # Pass supplied **kwargs as parameters to subscription-manager. Ignore # non-configuration parameters and replace '_' with '.'. For example, # 'server_hostname' becomes '--server.hostname'. for k, v in kwargs.items(): if re.search(r'^(server|rhsm)_', k) and v is not None: args.append('--%s=%s' % (k.replace('_', '.', 1), v)) self.module.run_command(args, check_rc=True) @property def is_registered(self): ''' Determine whether the current system Returns: * Boolean - whether the current system is currently registered to RHSM. ''' args = [SUBMAN_CMD, 'identity'] rc, stdout, stderr = self.module.run_command(args, check_rc=False) if rc == 0: return True else: return False def register(self, username, password, auto_attach, activationkey, org_id, consumer_type, consumer_name, consumer_id, force_register, environment, rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password): ''' Register the current system to the provided RHSM or Sat6 server Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'register'] # Generate command arguments if force_register: args.extend(['--force']) if rhsm_baseurl: args.extend(['--baseurl', rhsm_baseurl]) if server_insecure: args.extend(['--insecure']) if server_hostname: args.extend(['--serverurl', server_hostname]) if org_id: args.extend(['--org', org_id]) if activationkey: args.extend(['--activationkey', activationkey]) else: if auto_attach: args.append('--auto-attach') if username: args.extend(['--username', username]) if password: args.extend(['--password', password]) if consumer_type: args.extend(['--type', consumer_type]) if consumer_name: args.extend(['--name', consumer_name]) if consumer_id: args.extend(['--consumerid', consumer_id]) if environment: args.extend(['--environment', environment]) if server_proxy_hostname and server_proxy_port: args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port]) if server_proxy_user: args.extend(['--proxyuser', server_proxy_user]) if server_proxy_password: args.extend(['--proxypassword', server_proxy_password]) rc, stderr, stdout = self.module.run_command(args, check_rc=True) def unsubscribe(self, serials=None): ''' Unsubscribe a system from subscribed channels Args: serials(list or None): list of serials to unsubscribe. If serials is none or an empty list, then all subscribed channels will be removed. Raises: * Exception - if error occurs while running command ''' items = [] if serials is not None and serials: items = ["--serial=%s" % s for s in serials] if serials is None: items = ["--all"] if items: args = [SUBMAN_CMD, 'unsubscribe'] + items rc, stderr, stdout = self.module.run_command(args, check_rc=True) return serials def unregister(self): ''' Unregister a currently registered system Raises: * Exception - if error occurs while running command ''' args = [SUBMAN_CMD, 'unregister'] rc, stderr, stdout = self.module.run_command(args, check_rc=True) self.update_plugin_conf('rhnplugin', False) self.update_plugin_conf('subscription-manager', False) def subscribe(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression. It matches regexp against available pool ids first. If any pool ids match, subscribe to those pools and return. If no pool ids match, then match regexp against available pool product names. Note this can still easily match many many pools. Then subscribe to those pools. Since a pool id is a more specific match, we only fallback to matching against names if we didn't match pool ids. Raises: * Exception - if error occurs while running command ''' # See https://github.com/ansible/ansible/issues/19466 # subscribe to pools whose pool id matches regexp (and only the pool id) subscribed_pool_ids = self.subscribe_pool(regexp) # If we found any matches, we are done # Don't attempt to match pools by product name if subscribed_pool_ids: return subscribed_pool_ids # We didn't match any pool ids. # Now try subscribing to pools based on product name match # Note: This can match lots of product names. subscribed_by_product_pool_ids = self.subscribe_product(regexp) if subscribed_by_product_pool_ids: return subscribed_by_product_pool_ids # no matches return [] def subscribe_by_pool_ids(self, pool_ids): for pool_id, quantity in pool_ids.items(): args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity] rc, stderr, stdout = self.module.run_command(args, check_rc=True) return pool_ids def subscribe_pool(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression Raises: * Exception - if error occurs while running command ''' # Available pools ready for subscription available_pools = RhsmPools(self.module) subscribed_pool_ids = [] for pool in available_pools.filter_pools(regexp): pool.subscribe() subscribed_pool_ids.append(pool.get_pool_id()) return subscribed_pool_ids def subscribe_product(self, regexp): ''' Subscribe current system to available pools matching the specified regular expression Raises: * Exception - if error occurs while running command ''' # Available pools ready for subscription available_pools = RhsmPools(self.module) subscribed_pool_ids = [] for pool in available_pools.filter_products(regexp): pool.subscribe() subscribed_pool_ids.append(pool.get_pool_id()) return subscribed_pool_ids def update_subscriptions(self, regexp): changed = False consumed_pools = RhsmPools(self.module, consumed=True) pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)] pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)]) serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] serials = self.unsubscribe(serials=serials_to_remove) subscribed_pool_ids = self.subscribe(regexp) if subscribed_pool_ids or serials: changed = True return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, 'unsubscribed_serials': serials} def update_subscriptions_by_pool_ids(self, pool_ids): changed = False consumed_pools = RhsmPools(self.module, consumed=True) existing_pools = {} for p in consumed_pools: existing_pools[p.get_pool_id()] = p.QuantityUsed serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed] serials = self.unsubscribe(serials=serials_to_remove) missing_pools = {} for pool_id, quantity in pool_ids.items(): if existing_pools.get(pool_id, 0) != quantity: missing_pools[pool_id] = quantity self.subscribe_by_pool_ids(missing_pools) if missing_pools or serials: changed = True return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(), 'unsubscribed_serials': serials} class RhsmPool(object): ''' Convenience class for housing subscription information ''' def __init__(self, module, **kwargs): self.module = module for k, v in kwargs.items(): setattr(self, k, v) def __str__(self): return str(self.__getattribute__('_name')) def get_pool_id(self): return getattr(self, 'PoolId', getattr(self, 'PoolID')) def subscribe(self): args = "subscription-manager subscribe --pool %s" % self.get_pool_id() rc, stdout, stderr = self.module.run_command(args, check_rc=True) if rc == 0: return True else: return False class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ def __init__(self, module, consumed=False): self.module = module self.products = self._load_product_list(consumed) def __iter__(self): return self.products.__iter__() def _load_product_list(self, consumed=False): """ Loads list of all available or consumed pools for system in data structure Args: consumed(bool): if True list consumed pools, else list available pools (default False) """ args = "subscription-manager list" if consumed: args += " --consumed" else: args += " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] for line in stdout.split('\n'): # Remove leading+trailing whitespace line = line.strip() # An empty line implies the end of a output group if len(line) == 0: continue # If a colon ':' is found, parse elif ':' in line: (key, value) = line.split(':', 1) key = key.strip().replace(" ", "") # To unify value = value.strip() if key in ['ProductName', 'SubscriptionName']: # Remember the name for later processing products.append(RhsmPool(self.module, _name=value, key=value)) elif products: # Associate value with most recently recorded product products[-1].__setattr__(key, value) # FIXME - log some warning? # else: # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) return products def filter_pools(self, regexp='^$'): ''' Return a list of RhsmPools whose pool id matches the provided regular expression ''' r = re.compile(regexp) for product in self.products: if r.search(product.get_pool_id()): yield product def filter_products(self, regexp='^$'): ''' Return a list of RhsmPools whose product name matches the provided regular expression ''' r = re.compile(regexp) for product in self.products: if r.search(product._name): yield product def main(): # Load RHSM configuration from file rhsm = Rhsm(None) module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent']), username=dict(default=None, required=False), password=dict(default=None, required=False, no_log=True), server_hostname=dict(default=None, required=False), server_insecure=dict(default=None, required=False), rhsm_baseurl=dict(default=None, required=False), auto_attach=dict(aliases=['autosubscribe'], default=False, type='bool'), activationkey=dict(default=None, required=False, no_log=True), org_id=dict(default=None, required=False), environment=dict(default=None, required=False, type='str'), pool=dict(default='^$', required=False, type='str'), pool_ids=dict(default=[], required=False, type='list'), consumer_type=dict(default=None, required=False), consumer_name=dict(default=None, required=False), consumer_id=dict(default=None, required=False), force_register=dict(default=False, type='bool'), server_proxy_hostname=dict(default=None, required=False), server_proxy_port=dict(default=None, required=False), server_proxy_user=dict(default=None, required=False), server_proxy_password=dict(default=None, required=False, no_log=True), ), required_together=[['username', 'password'], ['server_proxy_hostname', 'server_proxy_port'], ['server_proxy_user', 'server_proxy_password']], mutually_exclusive=[['activationkey', 'username'], ['activationkey', 'consumer_id'], ['activationkey', 'environment'], ['activationkey', 'autosubscribe'], ['force', 'consumer_id'], ['pool', 'pool_ids']], required_if=[['state', 'present', ['username', 'activationkey'], True]], ) rhsm.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] server_hostname = module.params['server_hostname'] server_insecure = module.params['server_insecure'] rhsm_baseurl = module.params['rhsm_baseurl'] auto_attach = module.params['auto_attach'] activationkey = module.params['activationkey'] org_id = module.params['org_id'] if activationkey and not org_id: module.fail_json(msg='org_id is required when using activationkey') environment = module.params['environment'] pool = module.params['pool'] pool_ids = {} for value in module.params['pool_ids']: if isinstance(value, dict): if len(value) != 1: module.fail_json(msg='Unable to parse pool_ids option.') pool_id, quantity = value.items()[0] else: pool_id, quantity = value, 1 pool_ids[pool_id] = str(quantity) consumer_type = module.params["consumer_type"] consumer_name = module.params["consumer_name"] consumer_id = module.params["consumer_id"] force_register = module.params["force_register"] server_proxy_hostname = module.params['server_proxy_hostname'] server_proxy_port = module.params['server_proxy_port'] server_proxy_user = module.params['server_proxy_user'] server_proxy_password = module.params['server_proxy_password'] global SUBMAN_CMD SUBMAN_CMD = module.get_bin_path('subscription-manager', True) # Ensure system is registered if state == 'present': # Register system if rhsm.is_registered and not force_register: if pool != '^$' or pool_ids: try: if pool_ids: result = rhsm.update_subscriptions_by_pool_ids(pool_ids) else: result = rhsm.update_subscriptions(pool) except Exception as e: module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) else: module.exit_json(**result) else: module.exit_json(changed=False, msg="System already registered.") else: try: rhsm.enable() rhsm.configure(**module.params) rhsm.register(username, password, auto_attach, activationkey, org_id, consumer_type, consumer_name, consumer_id, force_register, environment, rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password) if pool_ids: subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) else: subscribed_pool_ids = rhsm.subscribe(pool) except Exception as e: module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) else: module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname, subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': if not rhsm.is_registered: module.exit_json(changed=False, msg="System already unregistered.") else: try: rhsm.unsubscribe() rhsm.unregister() except Exception as e: module.fail_json(msg="Failed to unregister: %s" % to_native(e)) else: module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Author: Idan Gutman # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import re import traceback from sickbeard import logger from sickbeard import tvcache from sickbeard.bs4_parser import BS4Parser from sickbeard.providers import generic class HoundDawgsProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "HoundDawgs") self.username = None self.password = None self.ratio = None self.minseed = None self.minleech = None self.cache = HoundDawgsCache(self) self.urls = {'base_url': 'https://hounddawgs.org/', 'search': 'https://hounddawgs.org/torrents.php', 'login': 'https://hounddawgs.org/login.php'} self.url = self.urls['base_url'] self.search_params = { "filter_cat[85]": 1, "filter_cat[58]": 1, "filter_cat[57]": 1, "filter_cat[74]": 1, "filter_cat[92]": 1, "filter_cat[93]": 1, "order_by": "s3", "order_way": "desc", "type": '', "userid": '', "searchstr": '', "searchimdb": '', "searchtags": '' } def _doLogin(self): login_params = {'username': self.username, 'password': self.password, 'keeplogged': 'on', 'login': 'Login'} self.getURL(self.urls['base_url'], timeout=30) response = self.getURL(self.urls['login'], post_data=login_params, timeout=30) if not response: logger.log(u"Unable to connect to provider", logger.WARNING) return False if re.search('Dit brugernavn eller kodeord er forkert.', response) \ or re.search('<title>Login :: HoundDawgs</title>', response) \ or re.search('Dine cookies er ikke aktiveret.', response): logger.log(u"Invalid username or password. Check your settings", logger.WARNING) return False return True def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None): results = [] items = {'Season': [], 'Episode': [], 'RSS': []} if not self._doLogin(): return results for mode in search_strings.keys(): logger.log(u"Search Mode: %s" % mode, logger.DEBUG) for search_string in search_strings[mode]: if mode != 'RSS': logger.log(u"Search string: %s " % search_string, logger.DEBUG) self.search_params['searchstr'] = search_string data = self.getURL(self.urls['search'], params=self.search_params) strTableStart = "<table class=\"torrent_table" startTableIndex = data.find(strTableStart) trimmedData = data[startTableIndex:] if not trimmedData: continue try: with BS4Parser(trimmedData, features=["html5lib", "permissive"]) as html: result_table = html.find('table', {'id': 'torrent_table'}) if not result_table: logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG) continue result_tbody = result_table.find('tbody') entries = result_tbody.contents del entries[1::2] for result in entries[1:]: torrent = result.find_all('td') if len(torrent) <= 1: break allAs = (torrent[1]).find_all('a') try: # link = self.urls['base_url'] + allAs[2].attrs['href'] # url = result.find('td', attrs={'class': 'quickdownload'}).find('a') title = allAs[2].string # Trimming title so accepted by scene check(Feature has been rewuestet i forum) title = title.replace("custom.", "") title = title.replace("CUSTOM.", "") title = title.replace("Custom.", "") title = title.replace("dk", "") title = title.replace("DK", "") title = title.replace("Dk", "") title = title.replace("subs.", "") title = title.replace("SUBS.", "") title = title.replace("Subs.", "") download_url = self.urls['base_url']+allAs[0].attrs['href'] # FIXME size = -1 seeders = 1 leechers = 0 except (AttributeError, TypeError): continue if not title or not download_url: continue # Filter unseeded torrent # if seeders < self.minseed or leechers < self.minleech: # if mode != 'RSS': # logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG) # continue item = title, download_url, size, seeders, leechers if mode != 'RSS': logger.log(u"Found result: %s " % title, logger.DEBUG) items[mode].append(item) except Exception, e: logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR) # For each search mode sort all the items by seeders if available items[mode].sort(key=lambda tup: tup[3], reverse=True) results += items[mode] return results def seedRatio(self): return self.ratio class HoundDawgsCache(tvcache.TVCache): def __init__(self, provider_obj): tvcache.TVCache.__init__(self, provider_obj) # only poll HoundDawgs every 20 minutes max self.minTime = 20 def _getRSSData(self): search_strings = {'RSS': ['']} return {'entries': self.provider._doSearch(search_strings)} provider = HoundDawgsProvider()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_analytic_journal_report import account_analytic_balance_report import account_analytic_inverted_balance_report import account_analytic_cost_ledger_report import account_analytic_cost_ledger_for_journal_report import project_account_analytic_line import account_analytic_chart # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
/** @import { SpreadElement } from 'estree' */ /** @import { Context } from '../types' */ /** * @param {SpreadElement} node * @param {Context} context */ export function SpreadElement(node, context) { if (context.state.expression) { // treat e.g. `[...x]` the same as `[...x.values()]` context.state.expression.has_call = true; context.state.expression.has_state = true; } context.next(); }
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/src/compiler/phases/2-analyze/visitors/SpreadElement.js
# From Python Cookbook. Recipe 1.23 - Encoding Unicode Data for XML and HTML. # Problem: Want to encode Unicode text for output in HTML, or some other XML # application, using a limited but popular encoding such as ASCII or latin-1 def encode_for_xml(unicode_data, encoding='ascii'): return unicode_data.encode(encoding, 'xmlcharrefreplace') # If you prefer to use HTML's symbolic entity references instead. For this you # need to define and register a customized encoding error handler. import codecs from htmlentitydefs import codepoint2name def html_replace(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): s = [ u'&%s;' % codepoint2name[ord(c)] for c in exc.object[exc.start:exc.end]] return ''.join(s),exc.end else: raise TypeError("can't handle %s" % exc.__name__) codecs.register_error('html_replace',html_replace) def encode_for_html(unicode_data, encoding='ascii'): return unicode_data.encode(encoding, 'html_replace') if __name__ == '__main__': # demo data = u'''\ <html> <head> <title>Encoding Test</title> </head> <body> <p>accented characters: <ul> <li>\xe0 (a + grave) <li>\xe7 (c + cedilla) <li>\xe9 (e + acute) </ul> <p>symbols: <li>\xa3 (British pound) <li>\u20ac (Euro) <li>\u221e (Infinity) </ul> </body> </html> ''' #print encode_for_xml(data) print encode_for_html(data)
unknown
codeparrot/codeparrot-clean
#!/bin/bash # Golden tests are currently incompatible with Bazel Remote Execution unset GOLDEN_TEST_CONFIG_PATH is_ppc64le() { local -r arch="$(uname -m)" [[ "${arch}" == "ppc64le" || "${arch}" == "ppc64" || "${arch}" == "ppc" ]] && return 0 || return 1 } is_s390x() { local -r arch="$(uname -m)" [[ "${arch}" == "s390x" || "${arch}" == "s390" ]] && return 0 || return 1 } is_s390x_or_ppc64le() { if is_ppc64le || is_s390x; then return 0 else return 1 fi } ulimit -c unlimited # If RUNFILES_DIR is set, prepend "_main/" to the path to find the binary in the runfiles tree. # With --nolegacy_external_runfiles, binaries are located under $RUNFILES_DIR/_main/... test_bin="$1" if [[ -n "${RUNFILES_DIR:-}" && ! -x "$test_bin" ]]; then test_bin="${RUNFILES_DIR}/_main/${test_bin}" fi "${test_bin}" "${@:2}" & main_pid=$! echo "Process-under-test started with PID: ${main_pid}" # This is mocked out in buildscripts/bazel_testbuilds/verify_unittest_coredump_test.sh, make sure # to update the test if this is changed. timeout_seconds=600 if is_s390x_or_ppc64le; then timeout_seconds=$((timeout_seconds * 4)) fi timeout_minutes=$((timeout_seconds / 60)) ( sleep $timeout_seconds # 'kill -0' checks for process existence without sending a signal if kill -0 "$main_pid" 2>/dev/null; then echo "${timeout_minutes} minute Timer finished. Process-under-test ${main_pid} is still running. Sending a SIGABRT to trigger a coredump now." kill -ABRT "${main_pid}" fi ) & wait "${main_pid}" RET=$? CORE_FILE=$(find -L ./ -name "*.core") if [ -f "$CORE_FILE" ]; then CORE_FILENAME="dump_$(date +%s%N).core.gz" gzip -c $CORE_FILE >"$TEST_UNDECLARED_OUTPUTS_DIR/$CORE_FILENAME" echo "Writing coredump to $CORE_FILENAME..." fi exit $RET
unknown
github
https://github.com/mongodb/mongo
bazel/test_wrapper.sh
/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_JIT_TF_TO_HLO_COMPILER_H_ #define TENSORFLOW_COMPILER_JIT_TF_TO_HLO_COMPILER_H_ #include <memory> #include <vector> #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { class TfToHloCompiler { public: TfToHloCompiler() = default; virtual ~TfToHloCompiler() = default; // Compiles a Tensorflow `function` to an HloModuleProto stored in the // XlaCompilationResult pointed to by `result`. virtual absl::Status Compile(const XlaCompiler::CompileOptions& options, const NameAttrList& function, absl::Span<const XlaArgument> args, XlaCompilationResult* result) = 0; // Compiles a Tensorflow single op to an HloModuleProto stored in the // XlaCompilationResult pointed to by `result`. virtual absl::Status CompileSingleOp( const XlaCompiler::CompileOptions& options, const OpKernelContext* ctx, absl::Span<const XlaArgument> args, XlaCompilationResult* result) = 0; private: TfToHloCompiler(const TfToHloCompiler&) = delete; void operator=(const TfToHloCompiler&) = delete; }; } // namespace tensorflow #endif // TENSORFLOW_COMPILER_JIT_TF_TO_HLO_COMPILER_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/compiler/jit/tf_to_hlo_compiler.h
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_vxlan_vtep from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosVxlanVtepVniModule(TestNxosModule): module = nxos_vxlan_vtep def setUp(self): self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config') self.load_config = self.mock_load_config.start() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config') self.get_config = self.mock_get_config.start() def tearDown(self): self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, device=''): self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg') self.load_config.return_value = None def test_nxos_vxlan_vtep(self): set_module_args(dict(interface='nve1', description='simple description')) self.execute_module(changed=True, commands=['interface nve1', 'description simple description']) def test_nxos_vxlan_vtep_present_no_change(self): set_module_args(dict(interface='nve1')) self.execute_module(changed=False, commands=[]) def test_nxos_vxlan_vtep_absent(self): set_module_args(dict(interface='nve1', state='absent')) self.execute_module(changed=True, commands=['no interface nve1']) def test_nxos_vxlan_vtep_absent_no_change(self): set_module_args(dict(interface='nve2', state='absent')) self.execute_module(changed=False, commands=[])
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import multiscanner def test_valid_reports_string(): reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=False) assert r == '{"file":{"Test":"result"}}' def test_valid_reports_python(): reportlist = [([('file', 'result')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=True) assert r == {"file": {"Test": "result"}} def test_valid_utf8_string(): reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=False) assert r == u'{"file":{"Test":"안녕하세요"}}' def test_valid_utf8_python(): reportlist = [([('file', '안녕하세요')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=True) assert r == {"file": {"Test": "안녕하세요"}} def test_invalid_utf8_string(): reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=False) assert r == u'{"file":{"Test":"\x97안녕하세요"}}' or r == u'{"file":{"Test":"\ufffd안녕하세요"}}' def test_invalid_utf8_python(): reportlist = [([('file', '\x97안녕하세요')], {'Name': 'Test', 'Type': 'Test'})] r = multiscanner.parse_reports(reportlist, python=True) assert r == {"file": {"Test": "\x97안녕하세요"}} or r == {"file": {"Test": u"\ufffd안녕하세요"}}
unknown
codeparrot/codeparrot-clean
# Useful unit conversions def secsToDays(s): '''Assume s is time in seconds and a positive integer or float. Return time in days''' days = s / (60 * 60 * 24) return days def daysToSecs(d): '''Assume d is time in days and a positive integer or float. Return time in seconds''' secs = (60 * 60 * 24) * d return secs def dayToYear(d): '''Assume d is time in days and a positive integer or float. Return time in years''' year = 365.25 / d return year def yearToDay(y): '''Assume y is time in years and a positive integer or float. Return time in days''' day = y * 365.25 return day def mPerS(mPerD): '''Assume mPerD is a positive integer or float. Return mPerS''' mPerS = days(mPerD) return mPerS def mPerD(mPerS): '''Assume mPerS is a positive integer or float. Return mPerD''' mPerD = secs(mPerS) return mPerD def kgPerM3(mgl): # Convert mg/l to kg/m^3 kgPerM3 = mgl /1000 return kgPerM3 def mgPerL(kgPerM3): # Convert kg/m^3 to g/m^3 (same as mg/l) mgl = 1000 * kgPerM3 return mgl
unknown
codeparrot/codeparrot-clean
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ use Symfony\Bundle\FrameworkBundle\FrameworkBundle; use Symfony\Bundle\FrameworkBundle\Tests\Functional\Bundle\TestBundle\TestBundle; use Symfony\Bundle\MercureBundle\MercureBundle; return [ new FrameworkBundle(), new TestBundle(), new MercureBundle(), ];
php
github
https://github.com/symfony/symfony
src/Symfony/Bundle/FrameworkBundle/Tests/Functional/app/Notifier/bundles.php
<div ngTabs> <div ngTabList selectionMode="follow" selectedTab="movie"> <div ngTab value="movie">Movie</div> <div ngTab value="theatres">Cast</div> <div ngTab value="showtimes">Reviews</div> </div> <div class="sliding-window"> <div ngTabPanel [preserveContent]="true" value="movie"> <ng-template ngTabContent>Panel 1</ng-template> </div> <div ngTabPanel [preserveContent]="true" value="theatres"> <ng-template ngTabContent>Panel 2</ng-template> </div> <div ngTabPanel [preserveContent]="true" value="showtimes"> <ng-template ngTabContent>Panel 3</ng-template> </div> </div> </div>
html
github
https://github.com/angular/angular
adev/src/content/examples/aria/tabs/src/selection-follows-focus/app/app.html
name: Find inactive TSC voting members on: schedule: # Run every Tuesday 12:05 AM UTC. - cron: 5 0 * * 2 workflow_dispatch: env: NODE_VERSION: lts/* permissions: contents: read jobs: find: if: github.repository == 'nodejs/node' runs-on: ubuntu-slim steps: - name: Checkout the repo uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 persist-credentials: false - name: Clone nodejs/TSC repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 path: .tmp persist-credentials: false repository: nodejs/TSC - name: Use Node.js ${{ env.NODE_VERSION }} uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0 with: node-version: ${{ env.NODE_VERSION }} - name: Find inactive TSC voting members run: tools/find-inactive-tsc.mjs >> $GITHUB_ENV - name: Open pull request uses: gr2m/create-or-update-pull-request-action@77596e3166f328b24613f7082ab30bf2d93079d5 # Creates a PR or update the Action's existing PR, or # no-op if the base branch is already up-to-date. env: GITHUB_TOKEN: ${{ secrets.GH_USER_TOKEN }} with: author: Node.js GitHub Bot <github-bot@iojs.org> branch: actions/inactive-tsc body: | This PR was generated by tools/find-inactive-tsc.yml. @nodejs/tsc ${{ env.INACTIVE_TSC_HANDLES }} ${{ env.DETAILS_FOR_COMMIT_BODY }} commit-message: 'meta: move TSC voting member(s) to regular member(s)' labels: meta title: 'meta: move TSC voting member(s) to regular member(s)' update-pull-request-title-and-body: true
unknown
github
https://github.com/nodejs/node
.github/workflows/find-inactive-tsc.yml
#ifndef DATE_TIME_TIME_CLOCK_HPP___ #define DATE_TIME_TIME_CLOCK_HPP___ /* Copyright (c) 2002,2003,2005 CrystalClear Software, Inc. * Use, modification and distribution is subject to the * Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) * Author: Jeff Garland, Bart Garst * $Date$ */ /*! @file time_clock.hpp This file contains the interface for clock devices. */ #include "boost/date_time/c_time.hpp" #include "boost/shared_ptr.hpp" namespace boost { namespace date_time { //! A clock providing time level services based on C time_t capabilities /*! This clock provides resolution to the 1 second level */ template<class time_type> class second_clock { public: typedef typename time_type::date_type date_type; typedef typename time_type::time_duration_type time_duration_type; static time_type local_time() { ::std::time_t t; ::std::time(&t); ::std::tm curr, *curr_ptr; //curr_ptr = ::std::localtime(&t); curr_ptr = c_time::localtime(&t, &curr); return create_time(curr_ptr); } //! Get the current day in universal date as a ymd_type static time_type universal_time() { ::std::time_t t; ::std::time(&t); ::std::tm curr, *curr_ptr; //curr_ptr = ::std::gmtime(&t); curr_ptr = c_time::gmtime(&t, &curr); return create_time(curr_ptr); } template<class time_zone_type> static time_type local_time(boost::shared_ptr<time_zone_type> tz_ptr) { typedef typename time_type::utc_time_type utc_time_type; utc_time_type utc_time = second_clock<utc_time_type>::universal_time(); return time_type(utc_time, tz_ptr); } private: static time_type create_time(::std::tm* current) { date_type d(static_cast<unsigned short>(current->tm_year + 1900), static_cast<unsigned short>(current->tm_mon + 1), static_cast<unsigned short>(current->tm_mday)); time_duration_type td(current->tm_hour, current->tm_min, current->tm_sec); return time_type(d,td); } }; } } //namespace date_time #endif
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/date_time/time_clock.hpp
<div ngToolbar class="material-toolbar" aria-label="Text Formatting Tools"> <div class="group"> <button ngToolbarWidget value="undo" type="button" aria-label="undo" class="material-symbols-outlined" translate="no" > undo </button> <button ngToolbarWidget value="redo" type="button" aria-label="redo" class="material-symbols-outlined" translate="no" > redo </button> </div> <div class="separator" role="separator"></div> <div class="group"> <button ngToolbarWidget value="bold" type="button" aria-label="bold" #bold="ngToolbarWidget" class="material-symbols-outlined" [aria-pressed]="bold.selected()" translate="no" > format_bold </button> <button ngToolbarWidget value="italic" type="button" aria-label="italic" #italic="ngToolbarWidget" class="material-symbols-outlined" [aria-pressed]="italic.selected()" translate="no" > format_italic </button> <button ngToolbarWidget value="underlined" type="button" aria-label="underlined" #underlined="ngToolbarWidget" class="material-symbols-outlined" [aria-pressed]="underlined.selected()" translate="no" > format_underlined </button> </div> <div class="separator" role="separator"></div> <div ngToolbarWidgetGroup role="radiogroup" class="group" aria-label="Text alignment options"> <button ngToolbarWidget role="radio" type="button" value="align left" aria-label="align left" #leftAlign="ngToolbarWidget" class="material-symbols-outlined" [aria-checked]="leftAlign.selected()" translate="no" > format_align_left </button> <button ngToolbarWidget role="radio" type="button" value="align center" aria-label="align center" #centerAlign="ngToolbarWidget" class="material-symbols-outlined" [aria-checked]="centerAlign.selected()" translate="no" > format_align_center </button> <button ngToolbarWidget role="radio" type="button" value="align right" aria-label="align right" #rightAlign="ngToolbarWidget" class="material-symbols-outlined" [aria-checked]="rightAlign.selected()" translate="no" > format_align_right </button> </div> </div>
html
github
https://github.com/angular/angular
adev/src/content/examples/aria/toolbar/src/basic/material/app/app.html
"""Middleware for account-related functionality.""" from __future__ import unicode_literals import pytz from django.conf import settings from django.contrib import auth from django.utils import timezone from djblets.siteconfig.models import SiteConfiguration from reviewboard.accounts.backends import X509Backend from reviewboard.accounts.models import Profile class TimezoneMiddleware(object): """Middleware that activates the user's local timezone.""" def process_request(self, request): """Activate the user's selected timezone for this request.""" if request.user.is_authenticated(): try: user = request.user.get_profile() timezone.activate(pytz.timezone(user.timezone)) except pytz.UnknownTimeZoneError: pass class UpdateLastLoginMiddleware(object): """Middleware that updates a user's last login time more frequently. This will update the user's stored login time if it's been more than 30 minutes since they last made a request. This helps turn the login time into a recent activity time, providing a better sense of how often people are actively using Review Board. """ #: The smallest period of time between login time updates. UPDATE_PERIOD_SECS = 30 * 60 # 30 minutes def process_request(self, request): """Process the request and update the login time. Args: request (django.http.HttpRequest): The HTTP request from the client. """ user = request.user if user.is_authenticated(): now = timezone.now() delta = now - request.user.last_login if delta.total_seconds() >= self.UPDATE_PERIOD_SECS: user.last_login = now user.save(update_fields=('last_login',)) class X509AuthMiddleware(object): """Middleware that authenticates a user using X.509 certificates. If Review Board is configured to use the X.509 authentication backend, this will automatically authenticate the user using the environment variables set by mod_ssl. Apache needs to be configured with mod_ssl. For Review Board to be usable with X.509 client certificate authentication, the ``SSLVerifyClient`` configuration directive should be set to ``optional``. This will ensure that basic authentication will still work, allowing clients to work with a username and password. """ def process_request(self, request): """Log in users by their certificate if using X.509 authentication. This will only log in a user if the request environment (*not* the headers) are populated with a pre-verified username, and the request is being handled over HTTPS. Args: request (django.http.HttpRequest): The HTTP request from the client. """ if not request.is_secure(): return siteconfig = SiteConfiguration.objects.get_current() if siteconfig.get('auth_backend') != X509Backend.backend_id: return x509_settings_field = getattr(settings, 'X509_USERNAME_FIELD', None) if x509_settings_field == 'CUSTOM': x509_settings_field = getattr(settings, 'X509_CUSTOM_USERNAME_FIELD', None) if x509_settings_field: x509_field = request.environ.get(x509_settings_field) if x509_field: user = auth.authenticate(request=request, x509_field=x509_field) if user: request.user = user auth.login(request, user)
unknown
codeparrot/codeparrot-clean
# Shell library to run git-daemon in tests. Ends the test early if # GIT_TEST_GIT_DAEMON is not set. # # Usage: # # . ./test-lib.sh # . "$TEST_DIRECTORY"/lib-git-daemon.sh # start_git_daemon # # test_expect_success '...' ' # ... # ' # # test_expect_success ... # # test_done if ! test_bool_env GIT_TEST_GIT_DAEMON true then skip_all="git-daemon testing disabled (unset GIT_TEST_GIT_DAEMON to enable)" test_done fi if test_have_prereq !PIPE then test_skip_or_die GIT_TEST_GIT_DAEMON "file system does not support FIFOs" fi test_set_port LIB_GIT_DAEMON_PORT GIT_DAEMON_PID= GIT_DAEMON_PIDFILE="$PWD"/daemon.pid GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT registered_stop_git_daemon_atexit_handler= start_git_daemon() { if test -n "$GIT_DAEMON_PID" then error "start_git_daemon already called" fi mkdir -p "$GIT_DAEMON_DOCUMENT_ROOT_PATH" # One of the test scripts stops and then re-starts 'git daemon'. # Don't register and then run the same atexit handlers several times. if test -z "$registered_stop_git_daemon_atexit_handler" then test_atexit 'stop_git_daemon' registered_stop_git_daemon_atexit_handler=AlreadyDone fi say >&3 "Starting git daemon ..." mkfifo git_daemon_output ${LIB_GIT_DAEMON_COMMAND:-git daemon} \ --listen=127.0.0.1 --port="$LIB_GIT_DAEMON_PORT" \ --reuseaddr --verbose --pid-file="$GIT_DAEMON_PIDFILE" \ --base-path="$GIT_DAEMON_DOCUMENT_ROOT_PATH" \ "$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \ >&3 2>git_daemon_output & GIT_DAEMON_PID=$! { read -r line <&7 printf "%s\n" "$line" >&4 cat <&7 >&4 & } 7<git_daemon_output && # Check expected output if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble" then kill "$GIT_DAEMON_PID" wait "$GIT_DAEMON_PID" unset GIT_DAEMON_PID test_skip_or_die GIT_TEST_GIT_DAEMON \ "git daemon failed to start" fi } stop_git_daemon() { if test -z "$GIT_DAEMON_PID" then return fi # kill git-daemon child of git say >&3 "Stopping git daemon ..." kill "$GIT_DAEMON_PID" wait "$GIT_DAEMON_PID" >&3 2>&4 ret=$? if ! test_match_signal 15 $ret then error "git daemon exited with status: $ret" fi kill "$(cat "$GIT_DAEMON_PIDFILE")" 2>/dev/null GIT_DAEMON_PID= rm -f git_daemon_output "$GIT_DAEMON_PIDFILE" } # A stripped-down version of a netcat client, that connects to a "host:port" # given in $1, sends its stdin followed by EOF, then dumps the response (until # EOF) to stdout. fake_nc() { if ! test_declared_prereq FAKENC then echo >&4 "fake_nc: need to declare FAKENC prerequisite" return 127 fi perl -Mstrict -MIO::Socket::INET -e ' my $s = IO::Socket::INET->new(shift) or die "unable to open socket: $!"; print $s <STDIN>; $s->shutdown(1); print <$s>; ' "$@" } test_lazy_prereq FAKENC ' perl -MIO::Socket::INET -e "exit 0" '
unknown
github
https://github.com/git/git
t/lib-git-daemon.sh
#!/usr/bin/env python """Get data about the lengths of the annotated sequences""" import sys, argparse, gzip, re, os, inspect from seqtools.format.gpd import GPDStream def main(args): inf = None if re.search('\.gz',args.best_gpd): inf = gzip.open(args.best_gpd) else: inf = open(args.best_gpd) gs = GPDStream(inf) z = 0 data = {} for gpd in gs: z += 1 data[z] = [gpd.length,gpd.get_exon_count()] #gpd.length inf.close() inf = None if re.search('\.gz',args.best_annotation): inf = gzip.open(args.best_annotation) else: inf = open(args.best_annotation) done_reads = set() of = sys.stdout if args.output: if re.search('\.gz$',args.output): of = gzip.open(args.output,'w') else: of = open(args.output,'w') for line in inf: f = line.rstrip().split("\t") read_id = int(f[0]) type = f[4] done_reads.add(read_id) of.write(type+"\t"+str(data[read_id][0])+"\t"+str(data[read_id][1])+"\n") for i in [x for x in range(1,z+1) if x not in done_reads]: of.write('unannotated'+"\t"+str(data[i][0])+"\t"+str(data[i][1])+"\n") of.close() def do_inputs(): parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('best_gpd',help="Best alignments") parser.add_argument('best_annotation',help="Best annotations") parser.add_argument('-o','--output',help="output file") args = parser.parse_args() return args def external_cmd(cmd): cache_argv = sys.argv sys.argv = cmd args = do_inputs() main(args) sys.argv = cache_argv if __name__=="__main__": args = do_inputs() main(args)
unknown
codeparrot/codeparrot-clean
# stdlib import tempfile import time # project from tests.checks.common import AgentCheckTest, Fixtures class NagiosTestCase(AgentCheckTest): CHECK_NAME = 'nagios' NAGIOS_TEST_LOG = Fixtures.file('nagios.log') NAGIOS_TEST_HOST = Fixtures.file('host-perfdata') NAGIOS_TEST_SVC = Fixtures.file('service-perfdata') NAGIOS_TEST_HOST_TEMPLATE = "[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$" NAGIOS_TEST_SVC_TEMPLATE = "[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$" def get_config(self, nagios_conf, events=False, service_perf=False, host_perf=False): """ Helper to generate a valid Nagios configuration """ self.nagios_cfg = tempfile.NamedTemporaryFile(mode="a+b") self.nagios_cfg.write(nagios_conf) self.nagios_cfg.flush() return { 'instances': [{ 'nagios_conf': self.nagios_cfg.name, 'collect_events': events, 'collect_service_performance_data': service_perf, 'collect_host_performance_data': host_perf }] } class EventLogTailerTestCase(NagiosTestCase): def test_line_parser(self): """ Parse lines """ config = self.get_config( '\n'.join(["log_file={0}".format(self.NAGIOS_TEST_LOG)]), events=True ) self.run_check(config) nagios_tailer = self.check.nagios_tails[self.nagios_cfg.name][0] counters = {} for line in open(self.NAGIOS_TEST_LOG).readlines(): parsed = nagios_tailer._parse_line(line) if parsed: event = self.check.get_events()[-1] t = event["event_type"] assert t in line assert int(event["timestamp"]) > 0, line assert event["host"] is not None, line counters[t] = counters.get(t, 0) + 1 if t == "SERVICE ALERT": assert event["event_soft_hard"] in ("SOFT", "HARD"), line assert event["event_state"] in ("CRITICAL", "WARNING", "UNKNOWN", "OK"), line assert event["check_name"] is not None elif t == "SERVICE NOTIFICATION": assert event["event_state"] in ("ACKNOWLEDGEMENT", "OK", "CRITICAL", "WARNING", "ACKNOWLEDGEMENT (CRITICAL)"), line elif t == "SERVICE FLAPPING ALERT": assert event["flap_start_stop"] in ("STARTED", "STOPPED"), line assert event["check_name"] is not None elif t == "ACKNOWLEDGE_SVC_PROBLEM": assert event["check_name"] is not None assert event["ack_author"] is not None assert int(event["sticky_ack"]) >= 0 assert int(event["notify_ack"]) >= 0 elif t == "ACKNOWLEDGE_HOST_PROBLEM": assert event["ack_author"] is not None assert int(event["sticky_ack"]) >= 0 assert int(event["notify_ack"]) >= 0 elif t == "HOST DOWNTIME ALERT": assert event["host"] is not None assert event["downtime_start_stop"] in ("STARTED", "STOPPED") self.assertEquals(counters["SERVICE ALERT"], 301) self.assertEquals(counters["SERVICE NOTIFICATION"], 120) self.assertEquals(counters["HOST ALERT"], 3) self.assertEquals(counters["SERVICE FLAPPING ALERT"], 7) self.assertEquals(counters["CURRENT HOST STATE"], 8) self.assertEquals(counters["CURRENT SERVICE STATE"], 52) self.assertEquals(counters["SERVICE DOWNTIME ALERT"], 3) self.assertEquals(counters["HOST DOWNTIME ALERT"], 5) self.assertEquals(counters["ACKNOWLEDGE_SVC_PROBLEM"], 4) assert "ACKNOWLEDGE_HOST_PROBLEM" not in counters def test_continuous_bulk_parsing(self): """ Make sure the tailer continues to parse nagios as the file grows """ x = open(self.NAGIOS_TEST_LOG).read() events = [] ITERATIONS = 10 f = tempfile.NamedTemporaryFile(mode="a+b") f.write(x) f.flush() config = self.get_config('\n'.join(["log_file={0}".format(f.name)]), events=True) self.run_check(config) for i in range(ITERATIONS): f.write(x) f.flush() self.run_check(config) events.extend(self.events) f.close() self.assertEquals(len(events), ITERATIONS * 503) class PerfDataTailerTestCase(NagiosTestCase): POINT_TIME = (int(time.time()) / 15) * 15 DB_LOG_DATA = [( "DATATYPE::SERVICEPERFDATA", "TIMET::%s" % POINT_TIME, "HOSTNAME::myhost0", "SERVICEDESC::Pgsql Backends", "SERVICEPERFDATA::" + " ".join([ "time=0.06", "db0=33;180;190;0;200", "db1=1;150;190;0;200", "db2=0;120;290;1;200", "db3=0;110;195;5;100" ]), "SERVICECHECKCOMMAND::check_nrpe_1arg!check_postgres_backends", "HOSTSTATE::UP", "HOSTSTATETYPE::HARD", "SERVICESTATE::OK", "SERVICESTATETYPE::HARD", )] DISK_LOG_DATA = [( "DATATYPE::SERVICEPERFDATA", "TIMET::%s" % POINT_TIME, "HOSTNAME::myhost2", "SERVICEDESC::Disk Space", "SERVICEPERFDATA::" + " ".join([ "/=5477MB;6450;7256;0;8063", "/dev=0MB;2970;3341;0;3713", "/dev/shm=0MB;3080;3465;0;3851", "/var/run=0MB;3080;3465;0;3851", "/var/lock=0MB;3080;3465;0;3851", "/lib/init/rw=0MB;3080;3465;0;3851", "/mnt=290MB;338636;380966;0;423296", "/data=39812MB;40940;46057;0;51175", ]), "SERVICECHECKCOMMAND::check_all_disks!20%!10%", "HOSTSTATE::UP", "HOSTSTATETYPE::HARD", "SERVICESTATE::OK", "SERVICESTATETYPE::HARD", )] HOST_LOG_DATA = [( "DATATYPE::HOSTPERFDATA", "TIMET::%s" % POINT_TIME, "HOSTNAME::myhost1", "HOSTPERFDATA::" + " ".join([ "rta=0.978000ms;5000.000000;5000.000000;0.000000", "pl=0%;100;100;0", ]), "HOSTCHECKCOMMAND::check-host-alive", "HOSTSTATE::UP", "HOSTSTATETYPE::HARD", )] def _write_log(self, log_data): """ Write log data to log file """ for data in log_data: self.log_file.write(data + "\n") self.log_file.flush() def compare_metric(self, actual, expected): """ Return true when `actual` metic == `expected` metric """ self.assertEquals(actual[0], expected[0], "Metrics name actual:{0} vs expected:{1}" .format(actual[0], expected[0])) self.assertEquals(actual[1], expected[1], "Timestamp actual:{0} vs expected:{1}" .format(actual[1], expected[1])) self.assertEquals(actual[2], expected[2], "Value actual:{0} vs expected:{1}" .format(actual[2], expected[2])) self.assertEqual(actual[3], expected[3], "Context actual:{0} vs expected:{1}" .format(actual[3], expected[3])) def test_service_perfdata(self): """ Collect Nagios Service PerfData metrics """ self.log_file = tempfile.NamedTemporaryFile() config = self.get_config( '\n'.join(["service_perfdata_file=%s" % self.log_file.name, "service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$\tSERVICESTATE::$SERVICESTATE$\tSERVICESTATETYPE::$SERVICESTATETYPE$"]), service_perf=True) self.run_check(config) # Write content to log file and run check self._write_log(['\t'.join(data) for data in self.DB_LOG_DATA]) self.run_check(config) # Test metrics service_perf_data = self.DB_LOG_DATA[0][4][17:] # 'time=0.06 db0=33;180;190;0;200 db1=1;150;190;0;200 db2=0;120;290;1;200 db3=0;110;195;5;100' for metric_data in service_perf_data.split(" "): name, info = metric_data.split("=") metric_name = "nagios.pgsql_backends." + name values = info.split(";") value = float(values[0]) expected_tags = [] if len(values) == 5: expected_tags.append('warn:' + values[1]) expected_tags.append('crit:' + values[2]) expected_tags.append('min:' + values[3]) expected_tags.append('max:' + values[4]) self.assertMetric(metric_name, value=value, tags=expected_tags, count=1) self.coverage_report() def test_service_perfdata_special_cases(self): """ Handle special cases in PerfData metrics """ self.log_file = tempfile.NamedTemporaryFile() config = self.get_config( '\n'.join(["service_perfdata_file=%s" % self.log_file.name, "service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$\tSERVICESTATE::$SERVICESTATE$\tSERVICESTATETYPE::$SERVICESTATETYPE$",]), service_perf=True) self.run_check(config) # Write content to log file and run check self._write_log(['\t'.join(data) for data in self.DISK_LOG_DATA]) self.run_check(config) # Test metrics service_perf_data = self.DISK_LOG_DATA[0][4][17:] for metric_data in service_perf_data.split(" "): name, info = metric_data.split("=") values = info.split(";") value = int(values[0][:-2]) expected_tags = ['unit:' + values[0][-2:]] if len(values) == 5: expected_tags.append('warn:' + values[1]) expected_tags.append('crit:' + values[2]) expected_tags.append('min:' + values[3]) expected_tags.append('max:' + values[4]) self.assertMetric("nagios.disk_space", value=value, tags=expected_tags, device_name=name, count=1) self.coverage_report() def test_host_perfdata(self): """ Collect Nagios Host PerfData metrics """ self.log_file = tempfile.NamedTemporaryFile() config = self.get_config( '\n'.join(["host_perfdata_file=%s" % self.log_file.name, "host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$\tHOSTSTATE::$HOSTSTATE$\tHOSTSTATETYPE::$HOSTSTATETYPE$"]), host_perf=True) self.run_check(config) # Write content to log file and run check self._write_log(['\t'.join(data) for data in self.HOST_LOG_DATA]) self.run_check(config) # Test metric service_perf_data = self.HOST_LOG_DATA[0][3][14:] for metric_data in service_perf_data.split(" "): name, info = metric_data.split("=") metric_name = "nagios.host." + name values = info.split(";") index = values[0].find("ms") if values[0].find("ms") != -1 else values[0].find("%") index = len(values[0]) - index value = float(values[0][:-index]) expected_tags = ['unit:' + values[0][-index:]] if len(values) == 4: expected_tags.append('warn:' + values[1]) expected_tags.append('crit:' + values[2]) expected_tags.append('min:' + values[3]) self.assertMetric(metric_name, value=value, tags=expected_tags, count=1) self.coverage_report() def test_alt_service_perfdata(self): """ Collect Nagios Service PerfData metrics - alternative template """ self.log_file = tempfile.NamedTemporaryFile() perfdata_file = tempfile.NamedTemporaryFile() config = self.get_config('\n'.join( ["service_perfdata_file=%s" % perfdata_file.name, "service_perfdata_file_template=%s" % self.NAGIOS_TEST_SVC_TEMPLATE]), service_perf=True ) self.run_check(config) with open(self.NAGIOS_TEST_SVC, "r") as f: nagios_perf = f.read() perfdata_file.write(nagios_perf) perfdata_file.flush() self.run_check(config) # Test metrics expected_output = [ ( 'nagios.current_users.users', 1339511440, 1.0, { 'type': 'gauge', 'hostname': 'localhost', 'tags': ['warn:20', 'crit:50', 'min:0'] } ), ( 'nagios.ping.pl', 1339511500, 0.0, { 'type': 'gauge', 'hostname': 'localhost', 'tags': ['unit:%', 'warn:20', 'crit:60', 'min:0'] } ), ( 'nagios.ping.rta', 1339511500, 0.065, { 'type': 'gauge', 'hostname': 'localhost', 'tags': ['unit:ms', 'warn:100.000000', 'crit:500.000000', 'min:0.000000'] } ), ('nagios.root_partition', 1339511560, 2470.0, { 'type': 'gauge', 'hostname': 'localhost', 'device_name': '/', 'tags': ['unit:MB', 'warn:5852', 'crit:6583', 'min:0', 'max:7315'] } ) ] for actual, expected in zip(sorted(self.metrics), sorted(expected_output)): self.compare_metric(actual, expected) self.coverage_report() def test_alt_host_perfdata(self): """ Collect Nagios Host PerfData metrics - alternative template """ self.log_file = tempfile.NamedTemporaryFile() perfdata_file = tempfile.NamedTemporaryFile() config = self.get_config( '\n'.join(["host_perfdata_file=%s" % perfdata_file.name, "host_perfdata_file_template=%s" % self.NAGIOS_TEST_HOST_TEMPLATE]), host_perf=True) self.run_check(config) with open(self.NAGIOS_TEST_HOST, "r") as f: nagios_perf = f.read() perfdata_file.write(nagios_perf) perfdata_file.flush() self.run_check(config) # Test metrics expected_output = [ ( 'nagios.host.pl', 1339511440, 0.0, { 'type': 'gauge', 'hostname': 'localhost', 'tags': ['unit:%', 'warn:80', 'crit:100', 'min:0'] } ), ( 'nagios.host.rta', 1339511440, 0.048, { 'type': 'gauge', 'hostname': 'localhost', 'tags': ['unit:ms', 'warn:3000.000000', 'crit:5000.000000', 'min:0.000000'] } )] for actual, expected in zip(sorted(self.metrics), sorted(expected_output)): self.compare_metric(actual, expected) self.coverage_report()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2013 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### import os, sys, unittest fife_path = os.path.join('..','..','engine','python') if os.path.isdir(fife_path) and fife_path not in sys.path: sys.path.insert(0,fife_path) from fife import fife print "Using the FIFE python module found here: ", os.path.dirname(fife.__file__) from fife.extensions import fifelog def getEngine(minimized=False): e = fife.Engine() log = fifelog.LogManager(e, promptlog=False, filelog=True) log.setVisibleModules('all') s = e.getSettings() s.setRenderBackend('OpenGL') s.setDefaultFontPath('../data/FreeMono.ttf') s.setDefaultFontGlyphs(" abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + ".,!?-+/:();%`'*#=[]") if minimized: s.setScreenWidth(1) s.setScreenHeight(1) s.setDefaultFontSize(12) e.init() return e __all__ = [] __all__.append('unittest') __all__.append('fife') __all__.append('fifelog') __all__.append('getEngine')
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ Admin site configuration for third party authentication """ from config_models.admin import KeyedConfigurationModelAdmin from django import forms from django.contrib import admin from django.urls import reverse from django.db import DatabaseError, transaction from django.utils.translation import ugettext_lazy as _ from third_party_auth.provider import Registry from .models import ( _PSA_OAUTH2_BACKENDS, _PSA_SAML_BACKENDS, LTIProviderConfig, OAuth2ProviderConfig, ProviderApiPermissions, SAMLConfiguration, SAMLProviderConfig, SAMLProviderData ) from .tasks import fetch_saml_metadata from openedx.core.djangolib.markup import HTML class OAuth2ProviderConfigForm(forms.ModelForm): """ Django Admin form class for OAuth2ProviderConfig """ backend_name = forms.ChoiceField(choices=((name, name) for name in _PSA_OAUTH2_BACKENDS)) class OAuth2ProviderConfigAdmin(KeyedConfigurationModelAdmin): """ Django Admin class for OAuth2ProviderConfig """ form = OAuth2ProviderConfigForm def get_list_display(self, request): """ Don't show every single field in the admin change list """ return ( 'name', 'enabled', 'slug', 'site', 'backend_name', 'secondary', 'skip_registration_form', 'skip_email_verification', 'change_date', 'changed_by', 'edit_link', ) admin.site.register(OAuth2ProviderConfig, OAuth2ProviderConfigAdmin) class SAMLProviderConfigForm(forms.ModelForm): """ Django Admin form class for SAMLProviderConfig """ backend_name = forms.ChoiceField(choices=((name, name) for name in _PSA_SAML_BACKENDS)) class SAMLProviderConfigAdmin(KeyedConfigurationModelAdmin): """ Django Admin class for SAMLProviderConfig """ form = SAMLProviderConfigForm def get_queryset(self, request): """ Filter the queryset to exclude the archived records. """ queryset = super(SAMLProviderConfigAdmin, self).get_queryset(request).exclude(archived=True) return queryset def archive_provider_configuration(self, request, queryset): """ Archived the selected provider configurations. """ with transaction.atomic(): for obj in queryset: self.model.objects.filter(pk=obj.pk).update(archived=True, enabled=False) self.message_user(request, _("Deleted the selected configuration(s).")) def get_list_display(self, request): """ Don't show every single field in the admin change list """ return ( 'name_with_update_link', 'enabled', 'site', 'entity_id', 'metadata_source', 'has_data', 'mode', 'saml_configuration', 'change_date', 'changed_by', ) list_display_links = None def get_actions(self, request): """ Get the actions. """ actions = super(SAMLProviderConfigAdmin, self).get_actions(request) action_delete = { 'archive_provider_configuration': ( SAMLProviderConfigAdmin.archive_provider_configuration, 'archive_provider_configuration', _('Delete the selected configuration') ) } actions.update(action_delete) return actions def name_with_update_link(self, instance): """ Record name with link for the change view. """ if not instance.is_active: return instance.name update_url = reverse('admin:{}_{}_add'.format(self.model._meta.app_label, self.model._meta.model_name)) update_url += '?source={}'.format(instance.pk) return HTML(u'<a href="{}">{}</a>').format(update_url, instance.name) name_with_update_link.allow_tags = True name_with_update_link.short_description = u'Name' def has_data(self, inst): """ Do we have cached metadata for this SAML provider? """ if not inst.is_active: return None # N/A data = SAMLProviderData.current(inst.entity_id) return bool(data and data.is_valid()) has_data.short_description = u'Metadata Ready' has_data.boolean = True def mode(self, inst): """ Indicate if debug_mode is enabled or not""" if inst.debug_mode: return '<span style="color: red;">Debug</span>' return "Normal" mode.allow_tags = True def save_model(self, request, obj, form, change): """ Post save: Queue an asynchronous metadata fetch to update SAMLProviderData. We only want to do this for manual edits done using the admin interface. Note: This only works if the celery worker and the app worker are using the same 'configuration' cache. """ super(SAMLProviderConfigAdmin, self).save_model(request, obj, form, change) fetch_saml_metadata.apply_async((), countdown=2) admin.site.register(SAMLProviderConfig, SAMLProviderConfigAdmin) class SAMLConfigurationAdmin(KeyedConfigurationModelAdmin): """ Django Admin class for SAMLConfiguration """ def get_list_display(self, request): """ Shorten the public/private keys in the change view """ return ( 'site', 'slug', 'change_date', 'changed_by', 'enabled', 'entity_id', 'org_info_str', 'key_summary', 'edit_link', ) def key_summary(self, inst): """ Short summary of the key pairs configured """ public_key = inst.get_setting('SP_PUBLIC_CERT') private_key = inst.get_setting('SP_PRIVATE_KEY') if not public_key or not private_key: return HTML(u'<em>Key pair incomplete/missing</em>') pub1, pub2 = public_key[0:10], public_key[-10:] priv1, priv2 = private_key[0:10], private_key[-10:] return HTML(u'Public: {}…{}<br>Private: {}…{}').format(pub1, pub2, priv1, priv2) key_summary.allow_tags = True admin.site.register(SAMLConfiguration, SAMLConfigurationAdmin) class SAMLProviderDataAdmin(admin.ModelAdmin): """ Django Admin class for SAMLProviderData (Read Only) """ list_display = ('entity_id', 'is_valid', 'fetched_at', 'expires_at', 'sso_url') readonly_fields = ('is_valid', ) def get_readonly_fields(self, request, obj=None): if obj: # editing an existing object return [field.name for field in self.model._meta.get_fields()] return self.readonly_fields admin.site.register(SAMLProviderData, SAMLProviderDataAdmin) class LTIProviderConfigAdmin(KeyedConfigurationModelAdmin): """ Django Admin class for LTIProviderConfig """ exclude = ( 'icon_class', 'icon_image', 'secondary', ) def get_list_display(self, request): """ Don't show every single field in the admin change list """ return ( 'name', 'enabled', 'site', 'lti_consumer_key', 'lti_max_timestamp_age', 'change_date', 'changed_by', 'edit_link', ) admin.site.register(LTIProviderConfig, LTIProviderConfigAdmin) class ApiPermissionsAdminForm(forms.ModelForm): """ Django admin form for ApiPermissions model """ class Meta(object): model = ProviderApiPermissions fields = ['client', 'provider_id'] provider_id = forms.ChoiceField(choices=[], required=True) def __init__(self, *args, **kwargs): super(ApiPermissionsAdminForm, self).__init__(*args, **kwargs) self.fields['provider_id'].choices = ( (provider.provider_id, u"{} ({})".format(provider.name, provider.provider_id)) for provider in Registry.enabled() ) class ApiPermissionsAdmin(admin.ModelAdmin): """ Django Admin class for ApiPermissions """ list_display = ('client', 'provider_id') form = ApiPermissionsAdminForm admin.site.register(ProviderApiPermissions, ApiPermissionsAdmin)
unknown
codeparrot/codeparrot-clean
# (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import annotations DOCUMENTATION = """ name: varnames author: Ansible Core Team version_added: "2.8" short_description: Lookup matching variable names description: - Retrieves a list of matching Ansible variable names. options: _terms: description: List of Python regex patterns to search for in variable names. required: True seealso: - plugin_type: lookup plugin: ansible.builtin.vars """ EXAMPLES = """ - name: List variables that start with qz_ ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '^qz_.+') }}" vars: qz_1: hello qz_2: world qa_1: "I won't show" qz_: "I won't show either" - name: Show all variables ansible.builtin.debug: msg="{{ lookup('ansible.builtin.varnames', '.+') }}" - name: Show variables with 'hosts' in their names ansible.builtin.debug: msg="{{ q('varnames', 'hosts') }}" - name: Find several related variables that end specific way ansible.builtin.debug: msg="{{ query('ansible.builtin.varnames', '.+_zone$', '.+_location$') }}" - name: display values from variables found via varnames (note "*" is used to dereference the list to a 'list of arguments') debug: msg="{{ lookup('vars', *lookup('varnames', 'ansible_play_.+')) }}" """ RETURN = """ _value: description: - List of the variable names requested. type: list """ import re from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): if variables is None: raise AnsibleError('No variables available to search') self.set_options(var_options=variables, direct=kwargs) ret = [] variable_names = list(variables.keys()) for term in terms: if not isinstance(term, str): raise AnsibleError('Invalid setting identifier, "%s" is not a string, it is a %s' % (term, type(term))) try: name = re.compile(term) except Exception as e: raise AnsibleError('Unable to use "%s" as a search parameter: %s' % (term, to_native(e))) for varname in variable_names: if name.search(varname): ret.append(varname) return ret
python
github
https://github.com/ansible/ansible
lib/ansible/plugins/lookup/varnames.py
from msg_handler import db, logger from sqlalchemy.orm import backref from sqlalchemy import event import datetime # Create user model. class User(db.Model): id = db.Column(db.Integer, primary_key=True) first_name = db.Column(db.String(100)) last_name = db.Column(db.String(100)) email = db.Column(db.String(120), unique=True) password = db.Column(db.String(64)) # Flask-Login integration def is_authenticated(self): return True def is_active(self): return True def is_anonymous(self): return False def get_id(self): return self.id # Required for administrative interface def __unicode__(self): return self.email class Query(db.Model): query_id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(180)) vumi_message_id = db.Column(db.String(100), unique=True) conversation_key = db.Column(db.String(100)) from_addr = db.Column(db.String(100)) datetime = db.Column(db.DateTime(), default=datetime.datetime.now) status = db.Column(db.String(20), default="pending") starred = db.Column(db.Boolean, default=False) class Response(db.Model): response_id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(180), nullable=False) datetime = db.Column(db.DateTime(), default=datetime.datetime.now) query_id = db.Column(db.Integer, db.ForeignKey('query.query_id')) query = db.relationship('Query', backref=backref("responses", order_by=datetime)) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User') class Note(db.Model): note_id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(250), nullable=False) datetime = db.Column(db.DateTime(), default=datetime.datetime.now) query_id = db.Column(db.Integer, db.ForeignKey('query.query_id')) query = db.relationship('Query', backref=backref("notes", order_by=datetime)) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User') class Update(db.Model): update_id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(180)) datetime = db.Column(db.DateTime(), default=datetime.datetime.now) notes = db.Column(db.Text(250)) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User')
unknown
codeparrot/codeparrot-clean
# Copyright 2015, Pinterest, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inspector supports traversal of the token tree hierarchy. In particular, inspector comes handy when we work with tokens hierarchies that contain name components unknown to the client. Inspector understands the hierarchical names of workflow tokens and exposes an interface to traverse those names level-by-level. E.g., we may use the inspector to find all workflow instance ids or all waiting jobs in a given workflow isntance. """ from pinball.master.thrift_lib.ttypes import GroupRequest from pinball.workflow.name import Name __author__ = 'Pawel Garbacki' __copyright__ = 'Copyright 2015, Pinterest, Inc.' __credits__ = [__author__] __license__ = 'Apache' __version__ = '2.0' class Inspector(object): def __init__(self, client): self._client = client def get_workflow_names(self): """Return list of workflow names.""" request = GroupRequest() request.namePrefix = Name.WORKFLOW_PREFIX request.groupSuffix = Name.DELIMITER response = self._client.group(request) workflow_names = [] if response.counts: for prefix in response.counts.keys(): name = Name.from_workflow_prefix(prefix) if name.workflow: workflow_names.append(name.workflow) return workflow_names def get_workflow_instances(self, workflow_name): """Return list of instances of a given workflow.""" request = GroupRequest() name = Name() name.workflow = workflow_name request.namePrefix = name.get_workflow_prefix() request.groupSuffix = Name.DELIMITER response = self._client.group(request) instance_names = [] if response.counts: for prefix in response.counts.keys(): name = Name.from_instance_prefix(prefix) if name.instance: instance_names.append(name.instance) return instance_names def _get_job_names(self, workflow_name, instance, state): """Return list of job names in a given workflow instance and state. E.g., assume the following tokens are stored in the master: /workflow/some_workflow/12345/waiting/some_waiting_job /workflow/some_workflow/12345/waiting/some_other_waiting_job /workflow/some_workflow/12345/runnable/some_runnable_job the method called with workflow_name=some_workflow, instance=12345, state=waiting will return [some_waiting_job, some_other_waiting_job]. """ request = GroupRequest() name = Name() name.workflow = workflow_name name.instance = instance name.job_state = state request.namePrefix = name.get_job_state_prefix() request.groupSuffix = Name.DELIMITER response = self._client.group(request) job_names = [] if response.counts: for job_name in response.counts.keys(): name = Name.from_job_token_name(job_name) job_names.append(name.job) return job_names def get_runnable_job_names(self, workflow_name, instance): """Return names of runnable jobs in a given workflow instance.""" return self._get_job_names(workflow_name, instance, Name.RUNNABLE_STATE) def get_waiting_job_names(self, workflow_name, instance): """Return names of waiting jobs in a given workflow instance.""" return self._get_job_names(workflow_name, instance, Name.WAITING_STATE) def get_event_names(self, workflow_name, instance, job, input_name): """Return names of events under a workflow instance, job, and input.""" request = GroupRequest() name = Name() name.workflow = workflow_name name.instance = instance name.job = job name.input = input_name request.namePrefix = name.get_input_prefix() request.groupSuffix = Name.DELIMITER response = self._client.group(request) events = [] if response.counts: for event in response.counts.keys(): name = Name.from_event_token_name(event) events.append(name.event) return events
unknown
codeparrot/codeparrot-clean
# Migrating to 3.0.0 - The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`. - Cookie handling has been offloaded to the `cookie` crate: - `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs. - Some types now require lifetime parameters. - The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects any `actix-web` method previously expecting a time v0.1 input. - Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now result in `SameSite=None` being sent with the response Set-Cookie header. To create a cookie without a SameSite attribute, remove any calls setting same_site. - actix-http support for Actors messages was moved to actix-http crate and is enabled with feature `actors` - content_length function is removed from actix-http. You can set Content-Length by normally setting the response body or calling no_chunking function. - `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a `u64` instead of a `usize`. - Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use destructuring or `.into_inner()`. For example: ```rust // Previously: async fn some_route(path: web::Path<(String, String)>) -> String { format!("Hello, {} {}", path.0, path.1) } // Now (this also worked before): async fn some_route(path: web::Path<(String, String)>) -> String { let (first_name, last_name) = path.into_inner(); format!("Hello, {} {}", first_name, last_name) } // Or (this wasn't previously supported): async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String { format!("Hello, {} {}", first_name, last_name) } ``` - `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one. It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`, or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`. - `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. - `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
unknown
github
https://github.com/actix/actix-web
actix-web/MIGRATION-3.0.md
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_imish_config short_description: Manage BIG-IP advanced routing configuration sections description: - This module provides an implementation for working with advanced routing configuration sections in a deterministic way. version_added: 2.8 options: route_domain: description: - Route domain to manage BGP configuration on. default: 0 lines: description: - The ordered set of commands that should be configured in the section. - The commands must be the exact same commands as found in the device running-config. - Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. aliases: ['commands'] parents: description: - The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. - If the C(parents) argument is omitted, the commands are checked against the set of top level or global commands. src: description: - The I(src) argument provides a path to the configuration file to load into the remote system. - The path can either be a full system path to the configuration file if the value starts with / or relative to the root of the implemented role or playbook. - This argument is mutually exclusive with the I(lines) and I(parents) arguments. before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. - This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system. after: description: - The ordered set of commands to append to the end of the command stack if a change needs to be made. - Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. match: description: - Instructs the module on the way to perform the matching of the set of commands against the current device config. - If match is set to I(line), commands are matched line by line. - If match is set to I(strict), command lines are matched with respect to position. - If match is set to I(exact), command lines must be an equal match. - Finally, if match is set to I(none), the module will not attempt to compare the source configuration with the running configuration on the remote device. default: line choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration on the device. - If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. - If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. default: line choices: ['line', 'block'] backup: description: - This argument will cause the module to create a full backup of the current C(running-config) from the remote device before any changes are made. - The backup file is written to the C(backup) folder in the playbook root directory or role root directory, if playbook is part of an ansible role. If the directory does not exist, it is created. type: bool default: 'no' running_config: description: - The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. - There are times when it is not desirable to have the task get the current running-config for every task in a playbook. - The I(running_config) argument allows the implementer to pass in the configuration to use as the base config for comparison. aliases: ['config'] save_when: description: - When changes are made to the device running-configuration, the changes are not copied to non-volatile storage by default. - If the argument is set to I(always), then the running-config will always be copied to the startup-config and the I(modified) flag will always be set to C(True). - If the argument is set to I(modified), then the running-config will only be copied to the startup-config if it has changed since the last save to startup-config. - If the argument is set to I(never), the running-config will never be copied to the startup-config. - If the argument is set to I(changed), then the running-config will only be copied to the startup-config if the task has made a change. default: never choices: ['always', 'never', 'modified', 'changed'] diff_against: description: - When using the C(ansible-playbook --diff) command line argument the module can generate diffs against different sources. - When this option is configure as I(startup), the module will return the diff of the running-config against the startup-config. - When this option is configured as I(intended), the module will return the diff of the running-config against the configuration provided in the C(intended_config) argument. - When this option is configured as I(running), the module will return the before and after diff of the running-config with respect to any changes made to the device configuration. default: startup choices: ['startup', 'intended', 'running'] diff_ignore_lines: description: - Use this argument to specify one or more lines that should be ignored during the diff. - This is used for lines in the configuration that are automatically updated by the system. - This argument takes a list of regular expressions or exact line matches. intended_config: description: - The C(intended_config) provides the master configuration that the node should conform to and is used to check the final running-config against. - This argument will not modify any settings on the remote device and is strictly used to check the compliance of the current device's configuration against. - When specifying this argument, the task should also modify the C(diff_against) value and set it to I(intended). notes: - Abbreviated commands are NOT idempotent, see L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands). extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: configure top level configuration and save it bigip_imish_config: lines: bfd slow-timer 2000 save_when: modified provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: diff the running-config against a provided config bigip_imish_config: diff_against: intended intended_config: "{{ lookup('file', 'master.cfg') }}" provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: Add config to a parent block bigip_imish_config: lines: - bgp graceful-restart restart-time 120 - redistribute kernel route-map rhi - neighbor 10.10.10.11 remote-as 65000 - neighbor 10.10.10.11 fall-over bfd - neighbor 10.10.10.11 remote-as 65000 - neighbor 10.10.10.11 fall-over bfd parents: router bgp 64664 match: exact provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: Remove an existing acl before writing it bigip_imish_config: lines: - access-list 10 permit 20.20.20.20 - access-list 10 permit 20.20.20.21 - access-list 10 deny any before: no access-list 10 provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: for idempotency, use full-form commands bigip_imish_config: lines: # - desc My interface - description My Interface # parents: int ANYCAST-P2P-2 parents: interface ANYCAST-P2P-2 provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost ''' RETURN = r''' commands: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['interface ANYCAST-P2P-2', 'neighbor 20.20.20.21 remote-as 65000', 'neighbor 20.20.20.21 fall-over bfd'] updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['interface ANYCAST-P2P-2', 'neighbor 20.20.20.21 remote-as 65000', 'neighbor 20.20.20.21 fall-over bfd'] backup_path: description: The full path to the backup file returned: when backup is yes type: str sample: /playbooks/ansible/backup/bigip_imish_config.2016-07-16@22:28:34 ''' try: from StringIO import StringIO except ImportError: from io import StringIO import os import tempfile from ansible.module_utils.network.common.config import NetworkConfig, dumps from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.icontrol import upload_file except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.icontrol import upload_file class Parameters(AnsibleF5Parameters): api_map = { } api_attributes = [ ] returnables = [ '__backup__', 'commands', 'updates' ] updatables = [ ] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): result = dict() changed = self.present() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def present(self): result = dict(changed=False) config = None contents = None if self.want.backup or (self.module._diff and self.want.diff_against == 'running'): contents = self.read_current_from_device() config = NetworkConfig(indent=1, contents=contents) if self.want.backup: # The backup file is created in the bigip_imish_config action plugin. Refer # to that if you have questions. The key below is removed by the action plugin. result['__backup__'] = contents if any((self.want.src, self.want.lines)): match = self.want.match replace = self.want.replace candidate = self.get_candidate() running = self.get_running_config(contents) response = self.get_diff( candidate=candidate, running=running, diff_match=match, diff_ignore_lines=self.want.diff_ignore_lines, path=self.want.parents, diff_replace=replace ) config_diff = response['config_diff'] if config_diff: commands = config_diff.split('\n') if self.want.before: commands[:0] = self.want.before if self.want.after: commands.extend(self.want.after) result['commands'] = commands result['updates'] = commands if not self.module.check_mode: self.load_config(commands) result['changed'] = True running_config = self.want.running_config startup_config = None if self.want.save_when == 'always': self.save_config(result) elif self.want.save_when == 'modified': output = self.execute_show_commands(['show running-config', 'show startup-config']) running_config = NetworkConfig(indent=1, contents=output[0], ignore_lines=self.want.diff_ignore_lines) startup_config = NetworkConfig(indent=1, contents=output[1], ignore_lines=self.want.diff_ignore_lines) if running_config.sha1 != startup_config.sha1: self.save_config(result) elif self.want.save_when == 'changed' and result['changed']: self.save_on_device() if self.module._diff: if not running_config: output = self.execute_show_commands('show running-config') contents = output[0] else: contents = running_config # recreate the object in order to process diff_ignore_lines running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=self.want.diff_ignore_lines) if self.want.diff_against == 'running': if self.module.check_mode: self.module.warn("unable to perform diff against running-config due to check mode") contents = None else: contents = config.config_text elif self.want.diff_against == 'startup': if not startup_config: output = self.execute_show_commands('show startup-config') contents = output[0] else: contents = startup_config.config_text elif self.want.diff_against == 'intended': contents = self.want.intended_config if contents is not None: base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=self.want.diff_ignore_lines) if running_config.sha1 != base_config.sha1: if self.want.diff_against == 'intended': before = running_config after = base_config elif self.want.diff_against in ('startup', 'running'): before = base_config after = running_config result.update({ 'changed': True, 'diff': {'before': str(before), 'after': str(after)} }) self.changes.update(result) return result['changed'] def load_config(self, commands): content = StringIO("\n".join(commands)) file = tempfile.NamedTemporaryFile() name = os.path.basename(file.name) self.upload_file_to_device(content, name) self.load_config_on_device(name) self.remove_uploaded_file_from_device(name) def remove_uploaded_file_from_device(self, name): filepath = '/var/config/rest/downloads/{0}'.format(name) params = { "command": "run", "utilCmdArgs": filepath } uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def upload_file_to_device(self, content, name): url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format( self.client.provider['server'], self.client.provider['server_port'] ) try: upload_file(self.client, url, content, name) except F5ModuleError: raise F5ModuleError( "Failed to upload the file." ) def load_config_on_device(self, name): filepath = '/var/config/rest/downloads/{0}'.format(name) command = 'imish -r {0} -f {1}'.format(self.want.route_domain, filepath) params = { "command": "run", "utilCmdArgs": '-c "{0}"'.format(command) } uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() if 'commandResult' in response: if 'Dynamic routing is not enabled' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): command = 'imish -r {0} -e \\\"show running-config\\\"'.format(self.want.route_domain) params = { "command": "run", "utilCmdArgs": '-c "{0}"'.format(command) } uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() if 'commandResult' in response: if 'Dynamic routing is not enabled' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response['commandResult'] def save_on_device(self): command = 'imish -e write' params = { "command": "run", "utilCmdArgs": '-c "{0}"'.format(command) } uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'): diff = {} # prepare candidate configuration candidate_obj = NetworkConfig(indent=1) candidate_obj.load(candidate) if running and diff_match != 'none' and diff_replace != 'config': # running configuration running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines) configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace) else: configdiffobjs = candidate_obj.items diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else '' return diff def get_running_config(self, config=None): contents = self.want.running_config if not contents: if config: contents = config else: contents = self.read_current_from_device() return contents def get_candidate(self): candidate = '' if self.want.src: candidate = self.want.src elif self.want.lines: candidate_obj = NetworkConfig(indent=1) parents = self.want.parents or list() candidate_obj.add(self.want.lines, parents=parents) candidate = dumps(candidate_obj, 'raw') return candidate def execute_show_commands(self, commands): body = [] uri = "https://{0}:{1}/mgmt/tm/util/bash".format( self.client.provider['server'], self.client.provider['server_port'] ) for command in to_list(commands): command = 'imish -r {0} -e \\\"{1}\\\"'.format(self.want.route_domain, command) params = { "command": "run", "utilCmdArgs": '-c "{0}"'.format(command) } resp = self.client.api.post(uri, json=params) try: response = resp.json() if 'commandResult' in response: if 'Dynamic routing is not enabled' in response['commandResult']: raise F5ModuleError(response['commandResult']) except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) body.append(response['commandResult']) return body def save_config(self, result): result['changed'] = True if self.module.check_mode: self.module.warn( 'Skipping command `copy running-config startup-config` ' 'due to check_mode. Configuration not copied to ' 'non-volatile storage' ) return self.save_on_device() class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( route_domain=dict(default=0), src=dict(type='path'), lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block']), running_config=dict(aliases=['config']), intended_config=dict(), backup=dict(type='bool', default=False), save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'), diff_against=dict(choices=['running', 'startup', 'intended'], default='startup'), diff_ignore_lines=dict(type='list'), ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ('lines', 'src'), ('parents', 'src'), ] self.required_if = [ ('match', 'strict', ['lines']), ('match', 'exact', ['lines']), ('replace', 'block', ['lines']), ('diff_against', 'intended', ['intended_config']) ] self.add_file_common_args = True def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive, required_if=spec.required_if, add_file_common_args=spec.add_file_common_args, ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() exit_json(module, results, client) except F5ModuleError as ex: fail_json(module, ex, client) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.tests.virt.hyperv import test_vmutils from nova.virt.hyperv import vmutilsv2 class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase): """Unit tests for the Hyper-V VMUtilsV2 class.""" _DEFINE_SYSTEM = 'DefineSystem' _DESTROY_SYSTEM = 'DestroySystem' _DESTROY_SNAPSHOT = 'DestroySnapshot' _ADD_RESOURCE = 'AddResourceSettings' _REMOVE_RESOURCE = 'RemoveResourceSettings' _SETTING_TYPE = 'VirtualSystemType' _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized' def setUp(self): super(VMUtilsV2TestCase, self).setUp() self._vmutils = vmutilsv2.VMUtilsV2() self._vmutils._conn = mock.MagicMock() def test_modify_virt_resource(self): mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0] mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL) mock_res_setting_data = mock.MagicMock() mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA self._vmutils._modify_virt_resource(mock_res_setting_data, self._FAKE_VM_PATH) mock_svc.ModifyResourceSettings.assert_called_with( ResourceSettings=[self._FAKE_RES_DATA]) @mock.patch.object(vmutilsv2, 'wmi', create=True) @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val') def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi): self._lookup_vm() mock_svc = self._get_snapshot_service() mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL) self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME) mock_svc.CreateSnapshot.assert_called_with( AffectedSystem=self._FAKE_VM_PATH, SnapshotType=self._vmutils._SNAPSHOT_FULL) mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL, self._FAKE_JOB_PATH) @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource') @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data') @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name') def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd, mock_add_virt_res): self._lookup_vm() fake_eth_port = mock_get_new_sd.return_value self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None) mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH) @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks') def test_enable_vm_metrics_collection(self, mock_get_vm_disks): self._lookup_vm() mock_svc = self._vmutils._conn.Msvm_MetricService()[0] metric_def = mock.MagicMock() mock_disk = mock.MagicMock() mock_disk.path_.return_value = self._FAKE_RES_PATH mock_get_vm_disks.return_value = ([mock_disk], [mock_disk]) fake_metric_def_paths = ["fake_0", None] fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH] metric_def.path_.side_effect = fake_metric_def_paths self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [ metric_def] self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME) calls = [] for i in range(len(fake_metric_def_paths)): calls.append(mock.call( Subject=fake_metric_resource_paths[i], Definition=fake_metric_def_paths[i], MetricCollectionEnabled=self._vmutils._METRIC_ENABLED)) mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True) def _get_snapshot_service(self): return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0] def _assert_add_resources(self, mock_svc): getattr(mock_svc, self._ADD_RESOURCE).assert_called_with( self._FAKE_VM_PATH, [self._FAKE_RES_DATA]) def _assert_remove_resources(self, mock_svc): getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with( [self._FAKE_RES_PATH]) def test_list_instance_notes(self): vs = mock.MagicMock() attrs = {'ElementName': 'fake_name', 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']} vs.configure_mock(**attrs) self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs] response = self._vmutils.list_instance_notes() self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response) self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with( ['ElementName', 'Notes'], VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED) @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val') @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj') def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val, vm_path): mock_vs_man_svc = mock.MagicMock() mock_vs_data = mock.MagicMock() mock_job = mock.MagicMock() fake_job_path = 'fake job path' fake_ret_val = 'fake return value' _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData mock_check_ret_val.return_value = mock_job _conn.new.return_value = mock_vs_data mock_vs_man_svc.DefineSystem.return_value = (fake_job_path, vm_path, fake_ret_val) mock_job.associators.return_value = ['fake vm path'] response = self._vmutils._create_vm_obj(vs_man_svc=mock_vs_man_svc, vm_name='fake vm', notes='fake notes') if not vm_path: mock_job.associators.assert_called_once_with( self._vmutils._AFFECTED_JOB_ELEMENT_CLASS) _conn.new.assert_called_once_with() self.assertEqual(mock_vs_data.ElementName, 'fake vm') mock_vs_man_svc.DefineSystem.assert_called_once_with( ResourceSettings=[], ReferenceConfiguration=None, SystemSettings=mock_vs_data.GetText_(1)) mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path) mock_get_wmi_obj.assert_called_with('fake vm path') self.assertEqual(mock_vs_data.Notes, 'fake notes') self.assertEqual(response, mock_get_wmi_obj()) def test_create_vm_obj(self): self._test_create_vm_obj(vm_path='fake vm path') def test_create_vm_obj_no_vm_path(self): self._test_create_vm_obj(vm_path=None)
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations from datetime import datetime from typing import TYPE_CHECKING from sqlalchemy import Index, Integer, String, Text from sqlalchemy.orm import Mapped, mapped_column, relationship from airflow._shared.timezones import timezone from airflow.models.base import Base, StringID from airflow.utils.sqlalchemy import UtcDateTime if TYPE_CHECKING: from airflow.models.dag import DagModel from airflow.models.taskinstance import TaskInstance from airflow.models.taskinstancekey import TaskInstanceKey class Log(Base): """Used to actively log events to the database.""" __tablename__ = "log" id: Mapped[int] = mapped_column(Integer, primary_key=True) dttm: Mapped[datetime] = mapped_column(UtcDateTime) dag_id: Mapped[str | None] = mapped_column(StringID(), nullable=True) task_id: Mapped[str | None] = mapped_column(StringID(), nullable=True) map_index: Mapped[int | None] = mapped_column(Integer, nullable=True) event: Mapped[str] = mapped_column(String(60), nullable=False) logical_date: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True) run_id: Mapped[str | None] = mapped_column(StringID(), nullable=True) owner: Mapped[str | None] = mapped_column(String(500), nullable=True) owner_display_name: Mapped[str | None] = mapped_column(String(500), nullable=True) extra: Mapped[str | None] = mapped_column(Text, nullable=True) try_number: Mapped[int | None] = mapped_column(Integer, nullable=True) dag_model: Mapped[DagModel | None] = relationship( "DagModel", viewonly=True, foreign_keys=[dag_id], primaryjoin="Log.dag_id == DagModel.dag_id", ) task_instance: Mapped[TaskInstance | None] = relationship( "TaskInstance", viewonly=True, foreign_keys=[dag_id, task_id, run_id, map_index], primaryjoin="and_(Log.dag_id == TaskInstance.dag_id, Log.task_id == TaskInstance.task_id, Log.run_id == TaskInstance.run_id, Log.map_index == TaskInstance.map_index)", lazy="noload", ) __table_args__ = ( Index("idx_log_dttm", dttm), Index("idx_log_event", event), Index("idx_log_task_instance", dag_id, task_id, run_id, map_index, try_number), ) def __init__( self, event, task_instance: TaskInstance | TaskInstanceKey | None = None, owner=None, owner_display_name=None, extra=None, **kwargs, ): self.dttm = timezone.utcnow() self.event = event self.extra = extra task_owner = None self.logical_date = None if task_instance: self.dag_id = task_instance.dag_id self.task_id = task_instance.task_id self.run_id = task_instance.run_id if logical_date := getattr(task_instance, "logical_date", None): self.logical_date = logical_date self.try_number = task_instance.try_number self.map_index = task_instance.map_index if task := getattr(task_instance, "task", None): task_owner = task.owner if "task_id" in kwargs: self.task_id = kwargs["task_id"] if "dag_id" in kwargs: self.dag_id = kwargs["dag_id"] if kwargs.get("logical_date"): self.logical_date = kwargs["logical_date"] if kwargs.get("run_id"): self.run_id = kwargs["run_id"] if "map_index" in kwargs: self.map_index = kwargs["map_index"] if "try_number" in kwargs: self.try_number = kwargs["try_number"] self.owner = owner or task_owner self.owner_display_name = owner_display_name or None def __str__(self) -> str: return f"Log({self.event}, {self.task_id}, {self.owner}, {self.owner_display_name}, {self.extra})"
python
github
https://github.com/apache/airflow
airflow-core/src/airflow/models/log.py
# (c) 2013-2016, Michael DeHaan <michael.dehaan@gmail.com> # Stephen Fromm <sfromm@gmail.com> # Brian Coca <briancoca+dev@gmail.com> # Toshio Kuratomi <tkuratomi@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License from __future__ import (absolute_import, division, print_function) __metaclass__ = type import codecs import os import os.path import re import tempfile from ansible import constants as C from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail from ansible.module_utils._text import to_native, to_text from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase from ansible.utils.hashing import checksum_s class ActionModule(ActionBase): TRANSFERS_FILES = True def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP) tmp = os.fdopen(tmpfd, 'wb') delimit_me = False add_newline = False for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))): if compiled_regexp and not compiled_regexp.search(f): continue fragment = u"%s/%s" % (src_path, f) if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb').read() # always put a newline between fragments if the previous fragment didn't end with a newline. if add_newline: tmp.write(b'\n') # delimiters should only appear between fragments if delimit_me: if delimiter: # un-escape anything like newlines delimiter = codecs.escape_decode(delimiter)[0] tmp.write(delimiter) # always make sure there's a newline after the # delimiter, so lines don't run together if delimiter[-1] != b'\n': tmp.write(b'\n') tmp.write(fragment_content) delimit_me = True if fragment_content.endswith(b'\n'): add_newline = False else: add_newline = True tmp.close() return temp_path def run(self, tmp=None, task_vars=None): self._supports_check_mode = False result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect if task_vars is None: task_vars = dict() src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) follow = self._task.args.get('follow', False) ignore_hidden = self._task.args.get('ignore_hidden', False) decrypt = self._task.args.get('decrypt', True) try: if src is None or dest is None: raise AnsibleActionFail("src and dest are required") if boolean(remote_src, strict=False): result.update(self._execute_module(task_vars=task_vars)) raise _AnsibleActionDone() else: try: src = self._find_needle('files', src) except AnsibleError as e: raise AnsibleActionFail(to_native(e)) if not os.path.isdir(src): raise AnsibleActionFail(u"Source (%s) is not a directory" % src) _re = None if regexp is not None: _re = re.compile(regexp) # Does all work assembling the file path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt) path_checksum = checksum_s(path) dest = self._remote_expand_user(dest) dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow) diff = {} # setup args for running modules new_module_args = self._task.args.copy() # clean assemble specific options for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']: if opt in new_module_args: del new_module_args[opt] new_module_args.update( dict( dest=dest, original_basename=os.path.basename(src), ) ) if path_checksum != dest_stat['checksum']: if self._play_context.diff: diff = self._get_diff_data(dest, path, task_vars) remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src') xfered = self._transfer_file(path, remote_path) # fix file permissions when the copy is done as a different user self._fixup_perms2((self._connection._shell.tmpdir, remote_path)) new_module_args.update(dict(src=xfered,)) res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars) if diff: res['diff'] = diff result.update(res) else: result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)) except AnsibleAction as e: result.update(e.result) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
unknown
codeparrot/codeparrot-clean
import olsr_1d_utils def solveLinearRegression(X, Y): (a, b) = olsr_1d_utils.calculateCoefficients(X, Y) Yhat = olsr_1d_utils.calculateYhat(a, b, X) olsr_1d_utils.plotDataAndPrediction(X,Y,Yhat) r2 = olsr_1d_utils.calculateRSquared(Y, Yhat) print("a:", a, "b:", b) print("the r-squared is:", r2) def solveMooreProblem(): (X, Y) = olsr_1d_utils.loadDataMooreProblem() solveLinearRegression(X, Y) def solveBrainVsBodyWeightProblem(): (X, Y) = olsr_1d_utils.loadDataWeightsProblem() solveLinearRegression(X, Y) def solveFoodTruckProblem(): (X, Y) = olsr_1d_utils.loadDataFoodTruckProfitsProblem() solveLinearRegression(X, Y) def run(): problemsToSolve = ["moore", "weights", "foodTruck"] for index, problemKeyname in enumerate(problemsToSolve): if (index == 0): solveMooreProblem() elif (index == 1): solveBrainVsBodyWeightProblem() elif (index == 2): solveFoodTruckProblem() if __name__ == '__main__': run()
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ha; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.client.ZKClientConfig; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.Watcher.Event; import org.apache.zookeeper.ZKUtil; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.AsyncCallback.*; import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.KeeperException.Code; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.naming.ConfigurationException; import org.apache.hadoop.security.SecurityUtil.TruststoreKeystore; /** * * This class implements a simple library to perform leader election on top of * Apache Zookeeper. Using Zookeeper as a coordination service, leader election * can be performed by atomically creating an ephemeral lock file (znode) on * Zookeeper. The service instance that successfully creates the znode becomes * active and the rest become standbys. <br> * This election mechanism is only efficient for small number of election * candidates (order of 10's) because contention on single znode by a large * number of candidates can result in Zookeeper overload. <br> * The elector does not guarantee fencing (protection of shared resources) among * service instances. After it has notified an instance about becoming a leader, * then that instance must ensure that it meets the service consistency * requirements. If it cannot do so, then it is recommended to quit the * election. The application implements the {@link ActiveStandbyElectorCallback} * to interact with the elector */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ActiveStandbyElector implements StatCallback, StringCallback { /** * Callback interface to interact with the ActiveStandbyElector object. <br> * The application will be notified with a callback only on state changes * (i.e. there will never be successive calls to becomeActive without an * intermediate call to enterNeutralMode). <br> * The callbacks will be running on Zookeeper client library threads. The * application should return from these callbacks quickly so as not to impede * Zookeeper client library performance and notifications. The app will * typically remember the state change and return from the callback. It will * then proceed with implementing actions around that state change. It is * possible to be called back again while these actions are in flight and the * app should handle this scenario. */ public interface ActiveStandbyElectorCallback { /** * This method is called when the app becomes the active leader. * If the service fails to become active, it should throw * ServiceFailedException. This will cause the elector to * sleep for a short period, then re-join the election. * * Callback implementations are expected to manage their own * timeouts (e.g. when making an RPC to a remote node). * * @throws ServiceFailedException Service Failed Exception. */ void becomeActive() throws ServiceFailedException; /** * This method is called when the app becomes a standby */ void becomeStandby(); /** * If the elector gets disconnected from Zookeeper and does not know about * the lock state, then it will notify the service via the enterNeutralMode * interface. The service may choose to ignore this or stop doing state * changing operations. Upon reconnection, the elector verifies the leader * status and calls back on the becomeActive and becomeStandby app * interfaces. <br> * Zookeeper disconnects can happen due to network issues or loss of * Zookeeper quorum. Thus enterNeutralMode can be used to guard against * split-brain issues. In such situations it might be prudent to call * becomeStandby too. However, such state change operations might be * expensive and enterNeutralMode can help guard against doing that for * transient issues. */ void enterNeutralMode(); /** * If there is any fatal error (e.g. wrong ACL's, unexpected Zookeeper * errors or Zookeeper persistent unavailability) then notifyFatalError is * called to notify the app about it. * * @param errorMessage error message. */ void notifyFatalError(String errorMessage); /** * If an old active has failed, rather than exited gracefully, then * the new active may need to take some fencing actions against it * before proceeding with failover. * * @param oldActiveData the application data provided by the prior active */ void fenceOldActive(byte[] oldActiveData); } /** * Name of the lock znode used by the library. Protected for access in test * classes */ @VisibleForTesting protected static final String LOCK_FILENAME = "ActiveStandbyElectorLock"; @VisibleForTesting protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb"; public static final Logger LOG = LoggerFactory.getLogger(ActiveStandbyElector.class); private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000; private enum ConnectionState { DISCONNECTED, CONNECTED, TERMINATED }; enum State { INIT, ACTIVE, STANDBY, NEUTRAL }; private State state = State.INIT; private int createRetryCount = 0; private int statRetryCount = 0; private ZooKeeper zkClient; private WatcherWithClientRef watcher; private ConnectionState zkConnectionState = ConnectionState.TERMINATED; private final ActiveStandbyElectorCallback appClient; private final String zkHostPort; private final int zkSessionTimeout; private final List<ACL> zkAcl; private final List<ZKAuthInfo> zkAuthInfo; private TruststoreKeystore truststoreKeystore; private byte[] appData; private final String zkLockFilePath; private final String zkBreadCrumbPath; private final String znodeWorkingDir; private final int maxRetryNum; private Lock sessionReestablishLockForTests = new ReentrantLock(); private boolean wantToBeInElection; private boolean monitorLockNodePending = false; private ZooKeeper monitorLockNodeClient; /** * Create a new ActiveStandbyElector object <br> * The elector is created by providing to it the Zookeeper configuration, the * parent znode under which to create the znode and a reference to the * callback interface. <br> * The parent znode name must be the same for all service instances and * different across services. <br> * After the leader has been lost, a new leader will be elected after the * session timeout expires. Hence, the app must set this parameter based on * its needs for failure response time. The session timeout must be greater * than the Zookeeper disconnect timeout and is recommended to be 3X that * value to enable Zookeeper to retry transient disconnections. Setting a very * short session timeout may result in frequent transitions between active and * standby states during issues like network outages/GS pauses. * * @param zookeeperHostPorts * ZooKeeper hostPort for all ZooKeeper servers * @param zookeeperSessionTimeout * ZooKeeper session timeout * @param parentZnodeName * znode under which to create the lock * @param acl * ZooKeeper ACL's * @param authInfo a list of authentication credentials to add to the * ZK connection * @param app * reference to callback interface object * @param maxRetryNum maxRetryNum. * @param truststoreKeystore truststore keystore, that we will use for ZK if SSL/TLS is enabled * @throws IOException raised on errors performing I/O. * @throws HadoopIllegalArgumentException * if valid data is not supplied. * @throws KeeperException * other zookeeper operation errors. */ public ActiveStandbyElector(String zookeeperHostPorts, int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl, List<ZKAuthInfo> authInfo, ActiveStandbyElectorCallback app, int maxRetryNum, TruststoreKeystore truststoreKeystore) throws IOException, HadoopIllegalArgumentException, KeeperException { this(zookeeperHostPorts, zookeeperSessionTimeout, parentZnodeName, acl, authInfo, app, maxRetryNum, true, truststoreKeystore); } /** * Create a new ActiveStandbyElector object <br> * The elector is created by providing to it the Zookeeper configuration, the * parent znode under which to create the znode and a reference to the * callback interface. <br> * The parent znode name must be the same for all service instances and * different across services. <br> * After the leader has been lost, a new leader will be elected after the * session timeout expires. Hence, the app must set this parameter based on * its needs for failure response time. The session timeout must be greater * than the Zookeeper disconnect timeout and is recommended to be 3X that * value to enable Zookeeper to retry transient disconnections. Setting a very * short session timeout may result in frequent transitions between active and * standby states during issues like network outages/GS pauses. * * @param zookeeperHostPorts * ZooKeeper hostPort for all ZooKeeper servers * @param zookeeperSessionTimeout * ZooKeeper session timeout * @param parentZnodeName * znode under which to create the lock * @param acl * ZooKeeper ACL's * @param authInfo a list of authentication credentials to add to the * ZK connection * @param app * reference to callback interface object * @param failFast * whether need to add the retry when establishing ZK connection. * @param maxRetryNum max Retry Num * @param truststoreKeystore truststore keystore, that we will use for ZK if SSL/TLS is enabled * @throws IOException * raised on errors performing I/O. * @throws HadoopIllegalArgumentException * if valid data is not supplied. * @throws KeeperException * other zookeeper operation errors. */ public ActiveStandbyElector(String zookeeperHostPorts, int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl, List<ZKAuthInfo> authInfo, ActiveStandbyElectorCallback app, int maxRetryNum, boolean failFast, TruststoreKeystore truststoreKeystore) throws IOException, HadoopIllegalArgumentException, KeeperException { if (app == null || acl == null || parentZnodeName == null || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) { throw new HadoopIllegalArgumentException("Invalid argument"); } zkHostPort = zookeeperHostPorts; zkSessionTimeout = zookeeperSessionTimeout; zkAcl = acl; zkAuthInfo = authInfo; appClient = app; znodeWorkingDir = parentZnodeName; zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME; zkBreadCrumbPath = znodeWorkingDir + "/" + BREADCRUMB_FILENAME; this.maxRetryNum = maxRetryNum; this.truststoreKeystore = truststoreKeystore; // establish the ZK Connection for future API calls if (failFast) { createConnection(); } else { reEstablishSession(); } } /** * To participate in election, the app will call joinElection. The result will * be notified by a callback on either the becomeActive or becomeStandby app * interfaces. <br> * After this the elector will automatically monitor the leader status and * perform re-election if necessary<br> * The app could potentially start off in standby mode and ignore the * becomeStandby call. * * @param data * to be set by the app. non-null data must be set. * @throws HadoopIllegalArgumentException * if valid data is not supplied */ public synchronized void joinElection(byte[] data) throws HadoopIllegalArgumentException { if (data == null) { throw new HadoopIllegalArgumentException("data cannot be null"); } if (wantToBeInElection) { LOG.info("Already in election. Not re-connecting."); return; } appData = new byte[data.length]; System.arraycopy(data, 0, appData, 0, data.length); if (LOG.isDebugEnabled()) { LOG.debug("Attempting active election for " + this); } joinElectionInternal(); } /** * @return true if the configured parent znode exists * @throws IOException raised on errors performing I/O. * @throws InterruptedException interrupted exception. */ public synchronized boolean parentZNodeExists() throws IOException, InterruptedException { Preconditions.checkState(zkClient != null); try { return zkClient.exists(znodeWorkingDir, false) != null; } catch (KeeperException e) { throw new IOException("Couldn't determine existence of znode '" + znodeWorkingDir + "'", e); } } /** * Utility function to ensure that the configured base znode exists. * This recursively creates the znode as well as all of its parents. * * @throws IOException raised on errors performing I/O. * @throws InterruptedException interrupted exception. * @throws KeeperException other zookeeper operation errors. */ public synchronized void ensureParentZNode() throws IOException, InterruptedException, KeeperException { Preconditions.checkState(!wantToBeInElection, "ensureParentZNode() may not be called while in the election"); if (zkClient == null) { createConnection(); } String pathParts[] = znodeWorkingDir.split("/"); Preconditions.checkArgument(pathParts.length >= 1 && pathParts[0].isEmpty(), "Invalid path: %s", znodeWorkingDir); StringBuilder sb = new StringBuilder(); for (int i = 1; i < pathParts.length; i++) { sb.append("/").append(pathParts[i]); String prefixPath = sb.toString(); LOG.debug("Ensuring existence of " + prefixPath); try { createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT); } catch (KeeperException e) { if (isNodeExists(e.code())) { // Set ACLs for parent node, if they do not exist or are different try { setAclsWithRetries(prefixPath); } catch (KeeperException e1) { throw new IOException("Couldn't set ACLs on parent ZNode: " + prefixPath, e1); } } else { throw new IOException("Couldn't create " + prefixPath, e); } } } LOG.info("Successfully created " + znodeWorkingDir + " in ZK."); } /** * Clear all of the state held within the parent ZNode. * This recursively deletes everything within the znode as well as the * parent znode itself. It should only be used when it's certain that * no electors are currently participating in the election. * * @throws IOException raised on errors performing I/O. * @throws InterruptedException interrupted exception. */ public synchronized void clearParentZNode() throws IOException, InterruptedException { Preconditions.checkState(!wantToBeInElection, "clearParentZNode() may not be called while in the election"); try { LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK..."); zkDoWithRetries(new ZKAction<Void>() { @Override public Void run() throws KeeperException, InterruptedException { ZKUtil.deleteRecursive(zkClient, znodeWorkingDir); return null; } }); } catch (KeeperException e) { throw new IOException("Couldn't clear parent znode " + znodeWorkingDir, e); } LOG.info("Successfully deleted " + znodeWorkingDir + " from ZK."); } /** * Any service instance can drop out of the election by calling quitElection. * <br> * This will lose any leader status, if held, and stop monitoring of the lock * node. <br> * If the instance wants to participate in election again, then it needs to * call joinElection(). <br> * This allows service instances to take themselves out of rotation for known * impending unavailable states (e.g. long GC pause or software upgrade). * * @param needFence true if the underlying daemon may need to be fenced * if a failover occurs due to dropping out of the election. */ public synchronized void quitElection(boolean needFence) { LOG.info("Yielding from election"); if (!needFence && state == State.ACTIVE) { // If active is gracefully going back to standby mode, remove // our permanent znode so no one fences us. tryDeleteOwnBreadCrumbNode(); } reset(); wantToBeInElection = false; } /** * Exception thrown when there is no active leader */ public static class ActiveNotFoundException extends Exception { private static final long serialVersionUID = 3505396722342846462L; } /** * get data set by the active leader * * @return data set by the active instance * @throws ActiveNotFoundException * when there is no active leader * @throws KeeperException * other zookeeper operation errors * @throws InterruptedException * interrupted exception. * @throws IOException * when ZooKeeper connection could not be established */ public synchronized byte[] getActiveData() throws ActiveNotFoundException, KeeperException, InterruptedException, IOException { try { if (zkClient == null) { createConnection(); } Stat stat = new Stat(); return getDataWithRetries(zkLockFilePath, false, stat); } catch(KeeperException e) { Code code = e.code(); if (isNodeDoesNotExist(code)) { // handle the commonly expected cases that make sense for us throw new ActiveNotFoundException(); } else { throw e; } } } /** * interface implementation of Zookeeper callback for create */ @Override public synchronized void processResult(int rc, String path, Object ctx, String name) { if (isStaleClient(ctx)) return; if (LOG.isDebugEnabled()) { LOG.debug("CreateNode result: " + rc + " for path: " + path + " connectionState: " + zkConnectionState + " for " + this); } Code code = Code.get(rc); if (isSuccess(code)) { // we successfully created the znode. we are the leader. start monitoring if (becomeActive()) { monitorActiveStatus(); } else { reJoinElectionAfterFailureToBecomeActive(); } return; } if (isNodeExists(code)) { if (createRetryCount == 0) { // znode exists and we did not retry the operation. so a different // instance has created it. become standby and monitor lock. becomeStandby(); } // if we had retried then the znode could have been created by our first // attempt to the server (that we lost) and this node exists response is // for the second attempt. verify this case via ephemeral node owner. this // will happen on the callback for monitoring the lock. monitorActiveStatus(); return; } String errorMessage = "Received create error from Zookeeper. code:" + code.toString() + " for path " + path; LOG.debug(errorMessage); if (shouldRetry(code)) { if (createRetryCount < maxRetryNum) { LOG.debug("Retrying createNode createRetryCount: " + createRetryCount); ++createRetryCount; createLockNodeAsync(); return; } errorMessage = errorMessage + ". Not retrying further znode create connection errors."; } else if (isSessionExpired(code)) { // This isn't fatal - the client Watcher will re-join the election LOG.warn("Lock acquisition failed because session was lost"); return; } fatalError(errorMessage); } /** * interface implementation of Zookeeper callback for monitor (exists) */ @Override public synchronized void processResult(int rc, String path, Object ctx, Stat stat) { if (isStaleClient(ctx)) return; monitorLockNodePending = false; assert wantToBeInElection : "Got a StatNode result after quitting election"; if (LOG.isDebugEnabled()) { LOG.debug("StatNode result: " + rc + " for path: " + path + " connectionState: " + zkConnectionState + " for " + this); } Code code = Code.get(rc); if (isSuccess(code)) { // the following owner check completes verification in case the lock znode // creation was retried if (stat.getEphemeralOwner() == zkClient.getSessionId()) { // we own the lock znode. so we are the leader if (!becomeActive()) { reJoinElectionAfterFailureToBecomeActive(); } } else { // we dont own the lock znode. so we are a standby. becomeStandby(); } // the watch set by us will notify about changes return; } if (isNodeDoesNotExist(code)) { // the lock znode disappeared before we started monitoring it enterNeutralMode(); joinElectionInternal(); return; } String errorMessage = "Received stat error from Zookeeper. code:" + code.toString(); LOG.debug(errorMessage); if (shouldRetry(code)) { if (statRetryCount < maxRetryNum) { ++statRetryCount; monitorLockNodeAsync(); return; } errorMessage = errorMessage + ". Not retrying further znode monitoring connection errors."; } else if (isSessionExpired(code)) { // This isn't fatal - the client Watcher will re-join the election LOG.warn("Lock monitoring failed because session was lost"); return; } fatalError(errorMessage); } @VisibleForTesting public boolean getWantToBeInElection() { return wantToBeInElection; } /** * We failed to become active. Re-join the election, but * sleep for a few seconds after terminating our existing * session, so that other nodes have a chance to become active. * The failure to become active is already logged inside * becomeActive(). */ private void reJoinElectionAfterFailureToBecomeActive() { reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE); } /** * interface implementation of Zookeeper watch events (connection and node), * proxied by {@link WatcherWithClientRef}. */ synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) { Event.EventType eventType = event.getType(); if (isStaleClient(zk)) return; if (LOG.isDebugEnabled()) { LOG.debug("Watcher event type: " + eventType + " with state:" + event.getState() + " for path:" + event.getPath() + " connectionState: " + zkConnectionState + " for " + this); } if (eventType == Event.EventType.None) { // the connection state has changed switch (event.getState()) { case SyncConnected: LOG.info("Session connected."); // if the listener was asked to move to safe state then it needs to // be undone ConnectionState prevConnectionState = zkConnectionState; zkConnectionState = ConnectionState.CONNECTED; if (prevConnectionState == ConnectionState.DISCONNECTED && wantToBeInElection) { monitorActiveStatus(); } break; case Disconnected: LOG.info("Session disconnected. Entering neutral mode..."); // ask the app to move to safe state because zookeeper connection // is not active and we dont know our state zkConnectionState = ConnectionState.DISCONNECTED; enterNeutralMode(); break; case Expired: // the connection got terminated because of session timeout // call listener to reconnect LOG.info("Session expired. Entering neutral mode and rejoining..."); enterNeutralMode(); reJoinElection(0); break; case SaslAuthenticated: LOG.info("Successfully authenticated to ZooKeeper using SASL."); break; default: fatalError("Unexpected Zookeeper watch event state: " + event.getState()); break; } return; } // a watch on lock path in zookeeper has fired. so something has changed on // the lock. ideally we should check that the path is the same as the lock // path but trusting zookeeper for now String path = event.getPath(); if (path != null) { switch (eventType) { case NodeDeleted: if (state == State.ACTIVE) { enterNeutralMode(); } joinElectionInternal(); break; case NodeDataChanged: monitorActiveStatus(); break; default: if (LOG.isDebugEnabled()) { LOG.debug("Unexpected node event: " + eventType + " for path: " + path); } monitorActiveStatus(); } return; } // some unexpected error has occurred fatalError("Unexpected watch error from Zookeeper"); } /** * Get a new zookeeper client instance. protected so that test class can * inherit and mock out the zookeeper instance * * @return new zookeeper client instance * @throws IOException raised on errors performing I/O. * @throws KeeperException zookeeper connectionloss exception */ protected synchronized ZooKeeper connectToZooKeeper() throws IOException, KeeperException { // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and // may trigger the Connected event immediately. So, if we register the // watcher after constructing ZooKeeper, we may miss that event. Instead, // we construct the watcher first, and have it block any events it receives // before we can set its ZooKeeper reference. watcher = new WatcherWithClientRef(); ZooKeeper zk = createZooKeeper(); watcher.setZooKeeperRef(zk); // Wait for the asynchronous success/failure. This may throw an exception // if we don't connect within the session timeout. watcher.waitForZKConnectionEvent(zkSessionTimeout); for (ZKAuthInfo auth : zkAuthInfo) { zk.addAuthInfo(auth.getScheme(), auth.getAuth()); } return zk; } /** * Get a new zookeeper client instance. protected so that test class can * inherit and pass in a mock object for zookeeper * * @return new zookeeper client instance * @throws IOException raised on errors performing I/O. */ protected ZooKeeper createZooKeeper() throws IOException { ZKClientConfig zkClientConfig = new ZKClientConfig(); if (truststoreKeystore != null) { try { SecurityUtil.setSslConfiguration(zkClientConfig, truststoreKeystore); } catch (ConfigurationException ce) { throw new IOException(ce); } } return initiateZookeeper(zkClientConfig); } protected ZooKeeper initiateZookeeper(ZKClientConfig zkClientConfig) throws IOException { return new ZooKeeper(zkHostPort, zkSessionTimeout, watcher, zkClientConfig); } private void fatalError(String errorMessage) { LOG.error(errorMessage); reset(); appClient.notifyFatalError(errorMessage); } private void monitorActiveStatus() { assert wantToBeInElection; if (LOG.isDebugEnabled()) { LOG.debug("Monitoring active leader for " + this); } statRetryCount = 0; monitorLockNodeAsync(); } private void joinElectionInternal() { Preconditions.checkState(appData != null, "trying to join election without any app data"); if (zkClient == null) { if (!reEstablishSession()) { fatalError("Failed to reEstablish connection with ZooKeeper"); return; } } createRetryCount = 0; wantToBeInElection = true; createLockNodeAsync(); } private void reJoinElection(int sleepTime) { LOG.info("Trying to re-establish ZK session"); // Some of the test cases rely on expiring the ZK sessions and // ensuring that the other node takes over. But, there's a race // where the original lease holder could reconnect faster than the other // thread manages to take the lock itself. This lock allows the // tests to block the reconnection. It's a shame that this leaked // into non-test code, but the lock is only acquired here so will never // be contended. sessionReestablishLockForTests.lock(); try { terminateConnection(); sleepFor(sleepTime); // Should not join election even before the SERVICE is reported // as HEALTHY from ZKFC monitoring. if (appData != null) { joinElectionInternal(); } else { LOG.info("Not joining election since service has not yet been " + "reported as healthy."); } } finally { sessionReestablishLockForTests.unlock(); } } /** * Sleep for the given number of milliseconds. * This is non-static, and separated out, so that unit tests * can override the behavior not to sleep. * * @param sleepMs sleep ms. */ @VisibleForTesting protected void sleepFor(int sleepMs) { if (sleepMs > 0) { try { Thread.sleep(sleepMs); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } } @VisibleForTesting void preventSessionReestablishmentForTests() { sessionReestablishLockForTests.lock(); } @VisibleForTesting void allowSessionReestablishmentForTests() { sessionReestablishLockForTests.unlock(); } @VisibleForTesting synchronized long getZKSessionIdForTests() { if (zkClient != null) { return zkClient.getSessionId(); } else { return -1; } } @VisibleForTesting synchronized State getStateForTests() { return state; } @VisibleForTesting synchronized boolean isMonitorLockNodePending() { return monitorLockNodePending; } private boolean reEstablishSession() { int connectionRetryCount = 0; boolean success = false; while(!success && connectionRetryCount < maxRetryNum) { if (LOG.isDebugEnabled()) { LOG.debug("Establishing zookeeper connection for " + this); } try { createConnection(); success = true; } catch(IOException e) { LOG.warn(e.toString()); sleepFor(5000); } catch(KeeperException e) { LOG.warn(e.toString()); sleepFor(5000); } ++connectionRetryCount; } return success; } private void createConnection() throws IOException, KeeperException { if (zkClient != null) { try { zkClient.close(); } catch (InterruptedException e) { throw new IOException("Interrupted while closing ZK", e); } zkClient = null; watcher = null; } zkClient = connectToZooKeeper(); if (LOG.isDebugEnabled()) { LOG.debug("Created new connection for " + this); } } @InterfaceAudience.Private public synchronized void terminateConnection() { if (zkClient == null) { return; } if (LOG.isDebugEnabled()) { LOG.debug("Terminating ZK connection for " + this); } ZooKeeper tempZk = zkClient; zkClient = null; watcher = null; try { tempZk.close(); } catch(InterruptedException e) { LOG.warn(e.toString()); } zkConnectionState = ConnectionState.TERMINATED; wantToBeInElection = false; } private void reset() { state = State.INIT; terminateConnection(); } private boolean becomeActive() { assert wantToBeInElection; if (state == State.ACTIVE) { // already active return true; } try { Stat oldBreadcrumbStat = fenceOldActive(); writeBreadCrumbNode(oldBreadcrumbStat); LOG.debug("Becoming active for {}", this); appClient.becomeActive(); state = State.ACTIVE; return true; } catch (Exception e) { LOG.warn("Exception handling the winning of election", e); // Caller will handle quitting and rejoining the election. return false; } } /** * Write the "ActiveBreadCrumb" node, indicating that this node may need * to be fenced on failover. * @param oldBreadcrumbStat */ private void writeBreadCrumbNode(Stat oldBreadcrumbStat) throws KeeperException, InterruptedException { Preconditions.checkState(appData != null, "no appdata"); LOG.info("Writing znode {} to indicate that the local " + "node is the most recent active...", zkBreadCrumbPath); if (oldBreadcrumbStat == null) { // No previous active, just create the node createWithRetries(zkBreadCrumbPath, appData, zkAcl, CreateMode.PERSISTENT); } else { // There was a previous active, update the node setDataWithRetries(zkBreadCrumbPath, appData, oldBreadcrumbStat.getVersion()); } } /** * Try to delete the "ActiveBreadCrumb" node when gracefully giving up * active status. * If this fails, it will simply warn, since the graceful release behavior * is only an optimization. */ private void tryDeleteOwnBreadCrumbNode() { assert state == State.ACTIVE; LOG.info("Deleting bread-crumb of active node..."); // Sanity check the data. This shouldn't be strictly necessary, // but better to play it safe. Stat stat = new Stat(); byte[] data = null; try { data = zkClient.getData(zkBreadCrumbPath, false, stat); if (!Arrays.equals(data, appData)) { throw new IllegalStateException( "We thought we were active, but in fact " + "the active znode had the wrong data: " + StringUtils.byteToHexString(data) + " (stat=" + stat + ")"); } deleteWithRetries(zkBreadCrumbPath, stat.getVersion()); } catch (Exception e) { LOG.warn("Unable to delete our own bread-crumb of being active at {}." + ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e); } } /** * If there is a breadcrumb node indicating that another node may need * fencing, try to fence that node. * @return the Stat of the breadcrumb node that was read, or null * if no breadcrumb node existed */ private Stat fenceOldActive() throws InterruptedException, KeeperException { final Stat stat = new Stat(); byte[] data; LOG.info("Checking for any old active which needs to be fenced..."); try { data = zkDoWithRetries(new ZKAction<byte[]>() { @Override public byte[] run() throws KeeperException, InterruptedException { return zkClient.getData(zkBreadCrumbPath, false, stat); } }); } catch (KeeperException ke) { if (isNodeDoesNotExist(ke.code())) { LOG.info("No old node to fence"); return null; } // If we failed to read for any other reason, then likely we lost // our session, or we don't have permissions, etc. In any case, // we probably shouldn't become active, and failing the whole // thing is the best bet. throw ke; } LOG.info("Old node exists: {}", StringUtils.byteToHexString(data)); if (Arrays.equals(data, appData)) { LOG.info("But old node has our own data, so don't need to fence it."); } else { appClient.fenceOldActive(data); } return stat; } private void becomeStandby() { if (state != State.STANDBY) { LOG.debug("Becoming standby for {}", this); state = State.STANDBY; appClient.becomeStandby(); } } private void enterNeutralMode() { if (state != State.NEUTRAL) { LOG.debug("Entering neutral mode for {}", this); state = State.NEUTRAL; appClient.enterNeutralMode(); } } private void createLockNodeAsync() { zkClient.create(zkLockFilePath, appData, zkAcl, CreateMode.EPHEMERAL, this, zkClient); } private void monitorLockNodeAsync() { if (monitorLockNodePending && monitorLockNodeClient == zkClient) { LOG.info("Ignore duplicate monitor lock-node request."); return; } monitorLockNodePending = true; monitorLockNodeClient = zkClient; zkClient.exists(zkLockFilePath, watcher, this, zkClient); } private String createWithRetries(final String path, final byte[] data, final List<ACL> acl, final CreateMode mode) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction<String>() { @Override public String run() throws KeeperException, InterruptedException { return zkClient.create(path, data, acl, mode); } }); } private byte[] getDataWithRetries(final String path, final boolean watch, final Stat stat) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction<byte[]>() { @Override public byte[] run() throws KeeperException, InterruptedException { return zkClient.getData(path, watch, stat); } }); } private Stat setDataWithRetries(final String path, final byte[] data, final int version) throws InterruptedException, KeeperException { return zkDoWithRetries(new ZKAction<Stat>() { @Override public Stat run() throws KeeperException, InterruptedException { return zkClient.setData(path, data, version); } }); } private void deleteWithRetries(final String path, final int version) throws KeeperException, InterruptedException { zkDoWithRetries(new ZKAction<Void>() { @Override public Void run() throws KeeperException, InterruptedException { zkClient.delete(path, version); return null; } }); } private void setAclsWithRetries(final String path) throws KeeperException, InterruptedException { Stat stat = new Stat(); zkDoWithRetries(new ZKAction<Void>() { @Override public Void run() throws KeeperException, InterruptedException { List<ACL> acl = zkClient.getACL(path, stat); if (acl == null || !acl.containsAll(zkAcl) || !zkAcl.containsAll(acl)) { zkClient.setACL(path, zkAcl, stat.getAversion()); } return null; } }, Code.BADVERSION); } private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException, InterruptedException { return zkDoWithRetries(action, null); } private <T> T zkDoWithRetries(ZKAction<T> action, Code retryCode) throws KeeperException, InterruptedException { int retry = 0; while (true) { try { return action.run(); } catch (KeeperException ke) { if ((shouldRetry(ke.code()) || shouldRetry(ke.code(), retryCode)) && ++retry < maxRetryNum) { continue; } throw ke; } } } private interface ZKAction<T> { T run() throws KeeperException, InterruptedException; } /** * The callbacks and watchers pass a reference to the ZK client * which made the original call. We don't want to take action * based on any callbacks from prior clients after we quit * the election. * @param ctx the ZK client passed into the watcher * @return true if it matches the current client */ private synchronized boolean isStaleClient(Object ctx) { Preconditions.checkNotNull(ctx); if (zkClient != (ZooKeeper)ctx) { LOG.warn("Ignoring stale result from old client with sessionId {}", String.format("0x%08x", ((ZooKeeper)ctx).getSessionId())); return true; } return false; } /** * Watcher implementation which keeps a reference around to the * original ZK connection, and passes it back along with any * events. */ private final class WatcherWithClientRef implements Watcher { private ZooKeeper zk; /** * Latch fired whenever any event arrives. This is used in order * to wait for the Connected event when the client is first created. */ private CountDownLatch hasReceivedEvent = new CountDownLatch(1); /** * Latch used to wait until the reference to ZooKeeper is set. */ private CountDownLatch hasSetZooKeeper = new CountDownLatch(1); /** * Waits for the next event from ZooKeeper to arrive. * * @param connectionTimeoutMs zookeeper connection timeout in milliseconds * @throws KeeperException if the connection attempt times out. This will * be a ZooKeeper ConnectionLoss exception code. * @throws IOException if interrupted while connecting to ZooKeeper */ private void waitForZKConnectionEvent(int connectionTimeoutMs) throws KeeperException, IOException { try { if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) { LOG.error("Connection timed out: couldn't connect to ZooKeeper in " + "{} milliseconds", connectionTimeoutMs); zk.close(); throw KeeperException.create(Code.CONNECTIONLOSS); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException( "Interrupted when connecting to zookeeper server", e); } } private void setZooKeeperRef(ZooKeeper zk) { Preconditions.checkState(this.zk == null, "zk already set -- must be set exactly once"); this.zk = zk; hasSetZooKeeper.countDown(); } @Override public void process(WatchedEvent event) { hasReceivedEvent.countDown(); try { if (!hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS)) { LOG.debug("Event received with stale zk"); } ActiveStandbyElector.this.processWatchEvent( zk, event); } catch (Throwable t) { fatalError( "Failed to process watcher event " + event + ": " + StringUtils.stringifyException(t)); } } } private static boolean isSuccess(Code code) { return (code == Code.OK); } private static boolean isNodeExists(Code code) { return (code == Code.NODEEXISTS); } private static boolean isNodeDoesNotExist(Code code) { return (code == Code.NONODE); } private static boolean isSessionExpired(Code code) { return (code == Code.SESSIONEXPIRED); } private static boolean shouldRetry(Code code) { return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT; } private static boolean shouldRetry(Code code, Code retryIfCode) { return (retryIfCode == null ? false : retryIfCode == code); } @Override public String toString() { return "elector id=" + System.identityHashCode(this) + " appData=" + ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + " cb=" + appClient; } public String getHAZookeeperConnectionState() { return this.zkConnectionState.name(); } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
"""Command-line support for Coverage.""" import optparse, os, sys, time, traceback from coverage.backward import sorted # pylint: disable=W0622 from coverage.execfile import run_python_file, run_python_module from coverage.misc import CoverageException, ExceptionDuringRun, NoSource from coverage.debug import info_formatter class Opts(object): """A namespace class for individual options we'll build parsers from.""" append = optparse.make_option( '-a', '--append', action='store_false', dest="erase_first", help="Append coverage data to .coverage, otherwise it is started " "clean with each run." ) branch = optparse.make_option( '', '--branch', action='store_true', help="Measure branch coverage in addition to statement coverage." ) debug = optparse.make_option( '', '--debug', action='store', metavar="OPTS", help="Debug options, separated by commas" ) directory = optparse.make_option( '-d', '--directory', action='store', metavar="DIR", help="Write the output files to DIR." ) fail_under = optparse.make_option( '', '--fail-under', action='store', metavar="MIN", type="int", help="Exit with a status of 2 if the total coverage is less than MIN." ) help = optparse.make_option( '-h', '--help', action='store_true', help="Get help on this command." ) ignore_errors = optparse.make_option( '-i', '--ignore-errors', action='store_true', help="Ignore errors while reading source files." ) include = optparse.make_option( '', '--include', action='store', metavar="PAT1,PAT2,...", help="Include files only when their filename path matches one of " "these patterns. Usually needs quoting on the command line." ) pylib = optparse.make_option( '-L', '--pylib', action='store_true', help="Measure coverage even inside the Python installed library, " "which isn't done by default." ) show_missing = optparse.make_option( '-m', '--show-missing', action='store_true', help="Show line numbers of statements in each module that weren't " "executed." ) old_omit = optparse.make_option( '-o', '--omit', action='store', metavar="PAT1,PAT2,...", help="Omit files when their filename matches one of these patterns. " "Usually needs quoting on the command line." ) omit = optparse.make_option( '', '--omit', action='store', metavar="PAT1,PAT2,...", help="Omit files when their filename matches one of these patterns. " "Usually needs quoting on the command line." ) output_xml = optparse.make_option( '-o', '', action='store', dest="outfile", metavar="OUTFILE", help="Write the XML report to this file. Defaults to 'coverage.xml'" ) parallel_mode = optparse.make_option( '-p', '--parallel-mode', action='store_true', help="Append the machine name, process id and random number to the " ".coverage data file name to simplify collecting data from " "many processes." ) module = optparse.make_option( '-m', '--module', action='store_true', help="<pyfile> is an importable Python module, not a script path, " "to be run as 'python -m' would run it." ) rcfile = optparse.make_option( '', '--rcfile', action='store', help="Specify configuration file. Defaults to '.coveragerc'" ) source = optparse.make_option( '', '--source', action='store', metavar="SRC1,SRC2,...", help="A list of packages or directories of code to be measured." ) timid = optparse.make_option( '', '--timid', action='store_true', help="Use a simpler but slower trace method. Try this if you get " "seemingly impossible results!" ) title = optparse.make_option( '', '--title', action='store', metavar="TITLE", help="A text string to use as the title on the HTML." ) version = optparse.make_option( '', '--version', action='store_true', help="Display version information and exit." ) class CoverageOptionParser(optparse.OptionParser, object): """Base OptionParser for coverage. Problems don't exit the program. Defaults are initialized for all options. """ def __init__(self, *args, **kwargs): super(CoverageOptionParser, self).__init__( add_help_option=False, *args, **kwargs ) self.set_defaults( actions=[], branch=None, debug=None, directory=None, fail_under=None, help=None, ignore_errors=None, include=None, omit=None, parallel_mode=None, module=None, pylib=None, rcfile=True, show_missing=None, source=None, timid=None, title=None, erase_first=None, version=None, ) self.disable_interspersed_args() self.help_fn = self.help_noop def help_noop(self, error=None, topic=None, parser=None): """No-op help function.""" pass class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass def parse_args(self, args=None, options=None): """Call optparse.parse_args, but return a triple: (ok, options, args) """ try: options, args = \ super(CoverageOptionParser, self).parse_args(args, options) except self.OptionParserError: return False, None, None return True, options, args def error(self, msg): """Override optparse.error so sys.exit doesn't get called.""" self.help_fn(msg) raise self.OptionParserError class ClassicOptionParser(CoverageOptionParser): """Command-line parser for coverage.py classic arguments.""" def __init__(self): super(ClassicOptionParser, self).__init__() self.add_action('-a', '--annotate', 'annotate') self.add_action('-b', '--html', 'html') self.add_action('-c', '--combine', 'combine') self.add_action('-e', '--erase', 'erase') self.add_action('-r', '--report', 'report') self.add_action('-x', '--execute', 'execute') self.add_options([ Opts.directory, Opts.help, Opts.ignore_errors, Opts.pylib, Opts.show_missing, Opts.old_omit, Opts.parallel_mode, Opts.timid, Opts.version, ]) def add_action(self, dash, dashdash, action_code): """Add a specialized option that is the action to execute.""" option = self.add_option(dash, dashdash, action='callback', callback=self._append_action ) option.action_code = action_code def _append_action(self, option, opt_unused, value_unused, parser): """Callback for an option that adds to the `actions` list.""" parser.values.actions.append(option.action_code) class CmdOptionParser(CoverageOptionParser): """Parse one of the new-style commands for coverage.py.""" def __init__(self, action, options=None, defaults=None, usage=None, cmd=None, description=None ): """Create an OptionParser for a coverage command. `action` is the slug to put into `options.actions`. `options` is a list of Option's for the command. `defaults` is a dict of default value for options. `usage` is the usage string to display in help. `cmd` is the command name, if different than `action`. `description` is the description of the command, for the help text. """ if usage: usage = "%prog " + usage super(CmdOptionParser, self).__init__( prog="coverage %s" % (cmd or action), usage=usage, description=description, ) self.set_defaults(actions=[action], **(defaults or {})) if options: self.add_options(options) self.cmd = cmd or action def __eq__(self, other): # A convenience equality, so that I can put strings in unit test # results, and they will compare equal to objects. return (other == "<CmdOptionParser:%s>" % self.cmd) GLOBAL_ARGS = [ Opts.rcfile, Opts.help, ] CMDS = { 'annotate': CmdOptionParser("annotate", [ Opts.directory, Opts.ignore_errors, Opts.omit, Opts.include, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Make annotated copies of the given files, marking " "statements that are executed with > and statements that are " "missed with !." ), 'combine': CmdOptionParser("combine", GLOBAL_ARGS, usage = " ", description = "Combine data from multiple coverage files collected " "with 'run -p'. The combined results are written to a single " "file representing the union of the data." ), 'debug': CmdOptionParser("debug", GLOBAL_ARGS, usage = "<topic>", description = "Display information on the internals of coverage.py, " "for diagnosing problems. " "Topics are 'data' to show a summary of the collected data, " "or 'sys' to show installation information." ), 'erase': CmdOptionParser("erase", GLOBAL_ARGS, usage = " ", description = "Erase previously collected coverage data." ), 'help': CmdOptionParser("help", GLOBAL_ARGS, usage = "[command]", description = "Describe how to use coverage.py" ), 'html': CmdOptionParser("html", [ Opts.directory, Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.title, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Create an HTML report of the coverage of the files. " "Each file gets its own page, with the source decorated to show " "executed, excluded, and missed lines." ), 'report': CmdOptionParser("report", [ Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.show_missing, ] + GLOBAL_ARGS, usage = "[options] [modules]", description = "Report coverage statistics on modules." ), 'run': CmdOptionParser("execute", [ Opts.append, Opts.branch, Opts.debug, Opts.pylib, Opts.parallel_mode, Opts.module, Opts.timid, Opts.source, Opts.omit, Opts.include, ] + GLOBAL_ARGS, defaults = {'erase_first': True}, cmd = "run", usage = "[options] <pyfile> [program options]", description = "Run a Python program, measuring code execution." ), 'xml': CmdOptionParser("xml", [ Opts.fail_under, Opts.ignore_errors, Opts.omit, Opts.include, Opts.output_xml, ] + GLOBAL_ARGS, cmd = "xml", usage = "[options] [modules]", description = "Generate an XML report of coverage results." ), } OK, ERR, FAIL_UNDER = 0, 1, 2 class CoverageScript(object): """The command-line interface to Coverage.""" def __init__(self, _covpkg=None, _run_python_file=None, _run_python_module=None, _help_fn=None): # _covpkg is for dependency injection, so we can test this code. if _covpkg: self.covpkg = _covpkg else: import coverage self.covpkg = coverage # For dependency injection: self.run_python_file = _run_python_file or run_python_file self.run_python_module = _run_python_module or run_python_module self.help_fn = _help_fn or self.help self.classic = False self.coverage = None def command_line(self, argv): """The bulk of the command line interface to Coverage. `argv` is the argument list to process. Returns 0 if all is well, 1 if something went wrong. """ # Collect the command-line options. if not argv: self.help_fn(topic='minimum_help') return OK # The command syntax we parse depends on the first argument. Classic # syntax always starts with an option. self.classic = argv[0].startswith('-') if self.classic: parser = ClassicOptionParser() else: parser = CMDS.get(argv[0]) if not parser: self.help_fn("Unknown command: '%s'" % argv[0]) return ERR argv = argv[1:] parser.help_fn = self.help_fn ok, options, args = parser.parse_args(argv) if not ok: return ERR # Handle help and version. if self.do_help(options, args, parser): return OK # Check for conflicts and problems in the options. if not self.args_ok(options, args): return ERR # Listify the list options. source = unshell_list(options.source) omit = unshell_list(options.omit) include = unshell_list(options.include) debug = unshell_list(options.debug) # Do something. self.coverage = self.covpkg.coverage( data_suffix = options.parallel_mode, cover_pylib = options.pylib, timid = options.timid, branch = options.branch, config_file = options.rcfile, source = source, omit = omit, include = include, debug = debug, ) if 'debug' in options.actions: return self.do_debug(args) if 'erase' in options.actions or options.erase_first: self.coverage.erase() else: self.coverage.load() if 'execute' in options.actions: self.do_execute(options, args) if 'combine' in options.actions: self.coverage.combine() self.coverage.save() # Remaining actions are reporting, with some common options. report_args = dict( morfs = args, ignore_errors = options.ignore_errors, omit = omit, include = include, ) if 'report' in options.actions: total = self.coverage.report( show_missing=options.show_missing, **report_args) if 'annotate' in options.actions: self.coverage.annotate( directory=options.directory, **report_args) if 'html' in options.actions: total = self.coverage.html_report( directory=options.directory, title=options.title, **report_args) if 'xml' in options.actions: outfile = options.outfile total = self.coverage.xml_report(outfile=outfile, **report_args) if options.fail_under is not None: if total >= options.fail_under: return OK else: return FAIL_UNDER else: return OK def help(self, error=None, topic=None, parser=None): """Display an error message, or the named topic.""" assert error or topic or parser if error: print(error) print("Use 'coverage help' for help.") elif parser: print(parser.format_help().strip()) else: help_msg = HELP_TOPICS.get(topic, '').strip() if help_msg: print(help_msg % self.covpkg.__dict__) else: print("Don't know topic %r" % topic) def do_help(self, options, args, parser): """Deal with help requests. Return True if it handled the request, False if not. """ # Handle help. if options.help: if self.classic: self.help_fn(topic='help') else: self.help_fn(parser=parser) return True if "help" in options.actions: if args: for a in args: parser = CMDS.get(a) if parser: self.help_fn(parser=parser) else: self.help_fn(topic=a) else: self.help_fn(topic='help') return True # Handle version. if options.version: self.help_fn(topic='version') return True return False def args_ok(self, options, args): """Check for conflicts and problems in the options. Returns True if everything is ok, or False if not. """ for i in ['erase', 'execute']: for j in ['annotate', 'html', 'report', 'combine']: if (i in options.actions) and (j in options.actions): self.help_fn("You can't specify the '%s' and '%s' " "options at the same time." % (i, j)) return False if not options.actions: self.help_fn( "You must specify at least one of -e, -x, -c, -r, -a, or -b." ) return False args_allowed = ( 'execute' in options.actions or 'annotate' in options.actions or 'html' in options.actions or 'debug' in options.actions or 'report' in options.actions or 'xml' in options.actions ) if not args_allowed and args: self.help_fn("Unexpected arguments: %s" % " ".join(args)) return False if 'execute' in options.actions and not args: self.help_fn("Nothing to do.") return False return True def do_execute(self, options, args): """Implementation of 'coverage run'.""" # Set the first path element properly. old_path0 = sys.path[0] # Run the script. self.coverage.start() code_ran = True try: try: if options.module: sys.path[0] = '' self.run_python_module(args[0], args) else: filename = args[0] sys.path[0] = os.path.abspath(os.path.dirname(filename)) self.run_python_file(filename, args) except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() # Restore the old path sys.path[0] = old_path0 def do_debug(self, args): """Implementation of 'coverage debug'.""" if not args: self.help_fn("What information would you like: data, sys?") return ERR for info in args: if info == 'sys': print("-- sys ----------------------------------------") for line in info_formatter(self.coverage.sysinfo()): print(" %s" % line) elif info == 'data': print("-- data ---------------------------------------") self.coverage.load() print("path: %s" % self.coverage.data.filename) print("has_arcs: %r" % self.coverage.data.has_arcs()) summary = self.coverage.data.summary(fullpath=True) if summary: filenames = sorted(summary.keys()) print("\n%d files:" % len(filenames)) for f in filenames: print("%s: %d lines" % (f, summary[f])) else: print("No data collected") else: self.help_fn("Don't know what you mean by %r" % info) return ERR return OK def unshell_list(s): """Turn a command-line argument into a list.""" if not s: return None if sys.platform == 'win32': # When running coverage as coverage.exe, some of the behavior # of the shell is emulated: wildcards are expanded into a list of # filenames. So you have to single-quote patterns on the command # line, but (not) helpfully, the single quotes are included in the # argument, so we have to strip them off here. s = s.strip("'") return s.split(',') HELP_TOPICS = { # ------------------------- 'classic': r"""Coverage.py version %(__version__)s Measure, collect, and report on code coverage in Python programs. Usage: coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] Execute the module, passing the given command-line arguments, collecting coverage data. With the -p option, include the machine name and process id in the .coverage file name. With -L, measure coverage even inside the Python installed library, which isn't done by default. With --timid, use a simpler but slower trace method. coverage -e Erase collected coverage data. coverage -c Combine data from multiple coverage files (as created by -p option above) and store it into a single file representing the union of the coverage. coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] Report on the statement coverage for the given files. With the -m option, show line numbers of the statements that weren't executed. coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...] Create an HTML report of the coverage of the given files. Each file gets its own page, with the file listing decorated to show executed, excluded, and missed lines. coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...] Make annotated copies of the given files, marking statements that are executed with > and statements that are missed with !. -d DIR Write output files for -b or -a to this directory. -i Ignore errors while reporting or annotating. -o DIR,... Omit reporting or annotating files when their filename path starts with a directory listed in the omit list. e.g. coverage -i -r -o c:\python25,lib\enthought\traits Coverage data is saved in the file .coverage by default. Set the COVERAGE_FILE environment variable to save it somewhere else. """, # ------------------------- 'help': """\ Coverage.py, version %(__version__)s Measure, collect, and report on code coverage in Python programs. usage: coverage <command> [options] [args] Commands: annotate Annotate source files with execution information. combine Combine a number of data files. erase Erase previously collected coverage data. help Get help on using coverage.py. html Create an HTML report. report Report coverage stats on modules. run Run a Python program and measure code execution. xml Create an XML report of coverage results. Use "coverage help <command>" for detailed help on any command. Use "coverage help classic" for help on older command syntax. For more information, see %(__url__)s """, # ------------------------- 'minimum_help': """\ Code coverage for Python. Use 'coverage help' for help. """, # ------------------------- 'version': """\ Coverage.py, version %(__version__)s. %(__url__)s """, } def main(argv=None): """The main entry point to Coverage. This is installed as the script entry point. """ if argv is None: argv = sys.argv[1:] try: start = time.clock() status = CoverageScript().command_line(argv) end = time.clock() if 0: print("time: %.3fs" % (end - start)) except ExceptionDuringRun: # An exception was caught while running the product code. The # sys.exc_info() return tuple is packed into an ExceptionDuringRun # exception. _, err, _ = sys.exc_info() traceback.print_exception(*err.args) status = ERR except CoverageException: # A controlled error inside coverage.py: print the message to the user. _, err, _ = sys.exc_info() print(err) status = ERR except SystemExit: # The user called `sys.exit()`. Exit with their argument, if any. _, err, _ = sys.exc_info() if err.args: status = err.args[0] else: status = None return status
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import random import zipfile import numpy as np from six.moves import urllib from six.moves import xrange import tensorflow as tf # Step 1: Download the data. url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urllib.request.urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('text8.zip', 31344016) # Read the data into a list of strings. def read_data(filename): """Extract the first file enclosed in a zip file as a list of words""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data words = read_data(filename) print('Data size', len(words)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = 50000 def build_dataset(words, vocabulary_size): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionary data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size) del words # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index + len(data) - span) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(): # Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # Construct the SGD optimizer using a learning rate of 1.0. optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.global_variables_initializer() # Step 5: Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized") average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = "Nearest to %s:" % valid_word for k in xrange(top_k): close_word = reverse_dictionary[nearest[k]] log_str = "%s %s," % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() # Step 6: Visualize the embeddings. def plot_with_labels(low_dim_embs, labels, filename='tsne.png'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels) except ImportError: print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
unknown
codeparrot/codeparrot-clean
{ "name": "serve", "command": "ng serve [project]", "shortDescription": "Builds and serves your application, rebuilding on file changes.", "aliases": [ "dev", "s" ], "deprecated": false, "options": [ { "name": "allowed-hosts", "type": "boolean", "description": "The hosts that the development server will respond to. This option sets the Vite option of the same name. For further details: https://vite.dev/config/server-options.html#server-allowedhosts" }, { "name": "build-target", "type": "string", "description": "A build builder target to serve in the format of `project:target[:configuration]`. You can also pass in more than one configuration name as a comma-separated list. Example: `project:target:production,staging`." }, { "name": "configuration", "type": "string", "aliases": [ "c" ], "description": "One or more named builder configurations as a comma-separated list as specified in the \"configurations\" section in angular.json.\nThe builder uses the named configurations to run the given target.\nFor more information, see https://angular.dev/reference/configs/workspace-config#alternate-build-configurations." }, { "name": "define", "type": "array", "description": "Defines global identifiers that will be replaced with a specified constant value when found in any JavaScript or TypeScript code including libraries. The value will be used directly. String values must be put in quotes. Identifiers within Angular metadata such as Component Decorators will not be replaced." }, { "name": "headers", "type": "array", "description": "Custom HTTP headers to be added to all responses." }, { "name": "help", "type": "boolean", "description": "Shows a help message for this command in the console." }, { "name": "hmr", "type": "boolean", "description": "Enable hot module replacement. Defaults to the value of 'liveReload'. Currently, only global and component stylesheets are supported." }, { "name": "host", "type": "string", "default": "localhost", "description": "Host to listen on." }, { "name": "inspect", "type": "string", "description": "Activate debugging inspector. This option only has an effect when 'SSR' or 'SSG' are enabled." }, { "name": "live-reload", "type": "boolean", "default": true, "description": "Whether to reload the page on change, using live-reload." }, { "name": "open", "type": "boolean", "aliases": [ "o" ], "default": false, "description": "Opens the url in default browser." }, { "name": "poll", "type": "number", "description": "Enable and define the file watching poll time period in milliseconds." }, { "name": "port", "type": "number", "default": 4200, "description": "Port to listen on." }, { "name": "prebundle", "type": "boolean", "default": true, "description": "Enable and control the Vite-based development server's prebundling capabilities. To enable prebundling, the Angular CLI cache must also be enabled." }, { "name": "project", "type": "string", "description": "The name of the project to build. Can be an application or a library.", "positional": 0 }, { "name": "proxy-config", "type": "string", "description": "Proxy configuration file. For more information, see https://angular.dev/tools/cli/serve#proxying-to-a-backend-server." }, { "name": "serve-path", "type": "string", "description": "The pathname where the application will be served." }, { "name": "ssl", "type": "boolean", "default": false, "description": "Serve using HTTPS." }, { "name": "ssl-cert", "type": "string", "description": "SSL certificate to use for serving HTTPS." }, { "name": "ssl-key", "type": "string", "description": "SSL key to use for serving HTTPS." }, { "name": "verbose", "type": "boolean", "description": "Adds more details to output logging." }, { "name": "watch", "type": "boolean", "default": true, "description": "Rebuild on change." } ] }
json
github
https://github.com/angular/angular
adev/src/content/cli/serve.json
import os import shutil import sys import tempfile try: from twisted.trial.unittest import TestCase except ImportError: from unittest import TestCase from wok import renderers from wok.engine import Engine DefaultRenderers = {} def setUpModule(): for renderer in renderers.all: DefaultRenderers.update((ext, renderer) for ext in renderer.extensions) class TestEngine(TestCase): def setUp(self): self.tmp_path = tempfile.mkdtemp() os.chdir(self.tmp_path) def tearDown(self): os.chdir('..') if self.tmp_path is not None: shutil.rmtree(self.tmp_path) self.tmp_path = None if '__hooks__' in sys.modules: del sys.modules['__hooks__'] if '__renderers__' in sys.modules: del sys.modules['__renderers__'] def test_load_hooks_no_hooks(self): e = Engine.__new__(Engine) e.load_hooks() self.assertFalse(hasattr(e, 'hooks')) def test_load_hooks_empty_directory(self): os.mkdir('hooks') e = Engine.__new__(Engine) e.load_hooks() self.assertFalse(hasattr(e, 'hooks')) def test_load_hooks(self): os.mkdir('hooks') with open(os.path.join('hooks', '__hooks__.py'), 'a') as f: f.write('hooks = { "name": "action" }\n') e = Engine.__new__(Engine) e.load_hooks() self.assertIsInstance(e.hooks, dict) self.assertIn('name', e.hooks) self.assertEqual(e.hooks['name'], 'action') def test_load_renderers_no_ext_renderers(self): e = Engine.__new__(Engine) e.load_renderers() self.assertIsInstance(e.renderers, dict) self.assertDictEqual(e.renderers, DefaultRenderers) def test_load_renderers_empty_directory(self): os.mkdir('renderers') e = Engine.__new__(Engine) e.load_renderers() self.assertIsInstance(e.renderers, dict) self.assertDictEqual(e.renderers, DefaultRenderers) def test_load_renderers(self): os.mkdir('renderers') with open(os.path.join('renderers', '__renderers__.py'), 'a') as f: f.write('renderers = { "html": "class" }\n') e = Engine.__new__(Engine) e.load_renderers() self.assertIsInstance(e.renderers, dict) self.assertDictContainsSubset(DefaultRenderers, e.renderers) self.assertIn('html', e.renderers) self.assertEqual(e.renderers['html'], 'class')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ipa_sudorule author: Thomas Krahn (@Nosmoht) short_description: Manage FreeIPA sudo rule description: - Add, modify or delete sudo rule within IPA server using IPA API. options: cn: description: - Canonical name. - Can not be changed as it is the unique identifier. required: true aliases: ['name'] cmdcategory: description: - Command category the rule applies to. choices: ['all'] cmd: description: - List of commands assigned to the rule. - If an empty list is passed all commands will be removed from the rule. - If option is omitted commands will not be checked or changed. host: description: - List of hosts assigned to the rule. - If an empty list is passed all hosts will be removed from the rule. - If option is omitted hosts will not be checked or changed. - Option C(hostcategory) must be omitted to assign hosts. hostcategory: description: - Host category the rule applies to. - If 'all' is passed one must omit C(host) and C(hostgroup). - Option C(host) and C(hostgroup) must be omitted to assign 'all'. choices: ['all'] hostgroup: description: - List of host groups assigned to the rule. - If an empty list is passed all host groups will be removed from the rule. - If option is omitted host groups will not be checked or changed. - Option C(hostcategory) must be omitted to assign host groups. runasusercategory: description: - RunAs User category the rule applies to. choices: ['all'] version_added: "2.5" runasgroupcategory: description: - RunAs Group category the rule applies to. choices: ['all'] version_added: "2.5" user: description: - List of users assigned to the rule. - If an empty list is passed all users will be removed from the rule. - If option is omitted users will not be checked or changed. usercategory: description: - User category the rule applies to. choices: ['all'] usergroup: description: - List of user groups assigned to the rule. - If an empty list is passed all user groups will be removed from the rule. - If option is omitted user groups will not be checked or changed. state: description: State to ensure default: present choices: ['present', 'absent', 'enabled', 'disabled'] extends_documentation_fragment: ipa.documentation version_added: "2.3" ''' EXAMPLES = ''' # Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password. - ipa_sudorule: name: sudo_all_nopasswd cmdcategory: all description: Allow to run every command with sudo without password hostcategory: all sudoopt: - '!authenticate' usercategory: all ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret # Ensure user group developers can run every command on host group db-server as well as on host db01.example.com. - ipa_sudorule: name: sudo_dev_dbserver description: Allow developers to run every command with sudo on all database server cmdcategory: all host: - db01.example.com hostgroup: - db-server sudoopt: - '!authenticate' usergroup: - developers ipa_host: ipa.example.com ipa_user: admin ipa_pass: topsecret ''' RETURN = ''' sudorule: description: Sudorule as returned by IPA returned: always type: dict ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ipa import IPAClient, ipa_argument_spec from ansible.module_utils._text import to_native class SudoRuleIPAClient(IPAClient): def __init__(self, module, host, port, protocol): super(SudoRuleIPAClient, self).__init__(module, host, port, protocol) def sudorule_find(self, name): return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name}) def sudorule_add(self, name, item): return self._post_json(method='sudorule_add', name=name, item=item) def sudorule_mod(self, name, item): return self._post_json(method='sudorule_mod', name=name, item=item) def sudorule_del(self, name): return self._post_json(method='sudorule_del', name=name) def sudorule_add_option(self, name, item): return self._post_json(method='sudorule_add_option', name=name, item=item) def sudorule_add_option_ipasudoopt(self, name, item): return self.sudorule_add_option(name=name, item={'ipasudoopt': item}) def sudorule_remove_option(self, name, item): return self._post_json(method='sudorule_remove_option', name=name, item=item) def sudorule_remove_option_ipasudoopt(self, name, item): return self.sudorule_remove_option(name=name, item={'ipasudoopt': item}) def sudorule_add_host(self, name, item): return self._post_json(method='sudorule_add_host', name=name, item=item) def sudorule_add_host_host(self, name, item): return self.sudorule_add_host(name=name, item={'host': item}) def sudorule_add_host_hostgroup(self, name, item): return self.sudorule_add_host(name=name, item={'hostgroup': item}) def sudorule_remove_host(self, name, item): return self._post_json(method='sudorule_remove_host', name=name, item=item) def sudorule_remove_host_host(self, name, item): return self.sudorule_remove_host(name=name, item={'host': item}) def sudorule_remove_host_hostgroup(self, name, item): return self.sudorule_remove_host(name=name, item={'hostgroup': item}) def sudorule_add_allow_command(self, name, item): return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item}) def sudorule_remove_allow_command(self, name, item): return self._post_json(method='sudorule_remove_allow_command', name=name, item=item) def sudorule_add_user(self, name, item): return self._post_json(method='sudorule_add_user', name=name, item=item) def sudorule_add_user_user(self, name, item): return self.sudorule_add_user(name=name, item={'user': item}) def sudorule_add_user_group(self, name, item): return self.sudorule_add_user(name=name, item={'group': item}) def sudorule_remove_user(self, name, item): return self._post_json(method='sudorule_remove_user', name=name, item=item) def sudorule_remove_user_user(self, name, item): return self.sudorule_remove_user(name=name, item={'user': item}) def sudorule_remove_user_group(self, name, item): return self.sudorule_remove_user(name=name, item={'group': item}) def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None, runasgroupcategory=None, runasusercategory=None): data = {} if cmdcategory is not None: data['cmdcategory'] = cmdcategory if description is not None: data['description'] = description if hostcategory is not None: data['hostcategory'] = hostcategory if ipaenabledflag is not None: data['ipaenabledflag'] = ipaenabledflag if usercategory is not None: data['usercategory'] = usercategory if runasusercategory is not None: data['ipasudorunasusercategory'] = runasusercategory if runasgroupcategory is not None: data['ipasudorunasgroupcategory'] = runasgroupcategory return data def category_changed(module, client, category_name, ipa_sudorule): if ipa_sudorule.get(category_name, None) == ['all']: if not module.check_mode: # cn is returned as list even with only a single value. client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None}) return True return False def ensure(module, client): state = module.params['state'] name = module.params['cn'] cmd = module.params['cmd'] cmdcategory = module.params['cmdcategory'] host = module.params['host'] hostcategory = module.params['hostcategory'] hostgroup = module.params['hostgroup'] runasusercategory = module.params['runasusercategory'] runasgroupcategory = module.params['runasgroupcategory'] if state in ['present', 'enabled']: ipaenabledflag = 'TRUE' else: ipaenabledflag = 'FALSE' sudoopt = module.params['sudoopt'] user = module.params['user'] usercategory = module.params['usercategory'] usergroup = module.params['usergroup'] module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory, description=module.params['description'], hostcategory=hostcategory, ipaenabledflag=ipaenabledflag, usercategory=usercategory, runasusercategory=runasusercategory, runasgroupcategory=runasgroupcategory) ipa_sudorule = client.sudorule_find(name=name) changed = False if state in ['present', 'disabled', 'enabled']: if not ipa_sudorule: changed = True if not module.check_mode: ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule) else: diff = client.get_diff(ipa_sudorule, module_sudorule) if len(diff) > 0: changed = True if not module.check_mode: if 'hostcategory' in diff: if ipa_sudorule.get('memberhost_host', None) is not None: client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host')) if ipa_sudorule.get('memberhost_hostgroup', None) is not None: client.sudorule_remove_host_hostgroup(name=name, item=ipa_sudorule.get('memberhost_hostgroup')) client.sudorule_mod(name=name, item=module_sudorule) if cmd is not None: changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed if not module.check_mode: client.sudorule_add_allow_command(name=name, item=cmd) if runasusercategory is not None: changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed if runasgroupcategory is not None: changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed if host is not None: changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host, client.sudorule_add_host_host, client.sudorule_remove_host_host) or changed if hostgroup is not None: changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup, client.sudorule_add_host_hostgroup, client.sudorule_remove_host_hostgroup) or changed if sudoopt is not None: # client.modify_if_diff does not work as each option must be removed/added by its own ipa_list = ipa_sudorule.get('ipasudoopt', []) module_list = sudoopt diff = list(set(ipa_list) - set(module_list)) if len(diff) > 0: changed = True if not module.check_mode: for item in diff: client.sudorule_remove_option_ipasudoopt(name, item) diff = list(set(module_list) - set(ipa_list)) if len(diff) > 0: changed = True if not module.check_mode: for item in diff: client.sudorule_add_option_ipasudoopt(name, item) if user is not None: changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user, client.sudorule_add_user_user, client.sudorule_remove_user_user) or changed if usergroup is not None: changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup, client.sudorule_add_user_group, client.sudorule_remove_user_group) or changed else: if ipa_sudorule: changed = True if not module.check_mode: client.sudorule_del(name) return changed, client.sudorule_find(name) def main(): argument_spec = ipa_argument_spec() argument_spec.update(cmd=dict(type='list'), cmdcategory=dict(type='str', choices=['all']), cn=dict(type='str', required=True, aliases=['name']), description=dict(type='str'), host=dict(type='list'), hostcategory=dict(type='str', choices=['all']), hostgroup=dict(type='list'), runasusercategory=dict(type='str', choices=['all']), runasgroupcategory=dict(type='str', choices=['all']), sudoopt=dict(type='list'), state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']), user=dict(type='list'), usercategory=dict(type='str', choices=['all']), usergroup=dict(type='list')) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['cmdcategory', 'cmd'], ['hostcategory', 'host'], ['hostcategory', 'hostgroup'], ['usercategory', 'user'], ['usercategory', 'usergroup']], supports_check_mode=True) client = SudoRuleIPAClient(module=module, host=module.params['ipa_host'], port=module.params['ipa_port'], protocol=module.params['ipa_prot']) try: client.login(username=module.params['ipa_user'], password=module.params['ipa_pass']) changed, sudorule = ensure(module, client) module.exit_json(changed=changed, sudorule=sudorule) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
from rope.base import evaluate from rope.base import exceptions from rope.base import libutils from rope.base import pynames from rope.base import taskhandle from rope.base import utils from rope.base import worder from rope.base.change import ChangeSet, ChangeContents from rope.refactor import sourceutils, occurrences class EncapsulateField(object): def __init__(self, project, resource, offset): self.project = project self.name = worder.get_name_at(resource, offset) this_pymodule = self.project.get_pymodule(resource) self.pyname = evaluate.eval_location(this_pymodule, offset) if not self._is_an_attribute(self.pyname): raise exceptions.RefactoringError( 'Encapsulate field should be performed on class attributes.') self.resource = self.pyname.get_definition_location()[0].get_resource() def get_changes(self, getter=None, setter=None, resources=None, task_handle=taskhandle.NullTaskHandle()): """Get the changes this refactoring makes If `getter` is not `None`, that will be the name of the getter, otherwise ``get_${field_name}`` will be used. The same is true for `setter` and if it is None set_${field_name} is used. `resources` can be a list of `rope.base.resource.File`\s that the refactoring should be applied on; if `None` all python files in the project are searched. """ if resources is None: resources = self.project.get_python_files() changes = ChangeSet('Encapsulate field <%s>' % self.name) job_set = task_handle.create_jobset('Collecting Changes', len(resources)) if getter is None: getter = 'get_' + self.name if setter is None: setter = 'set_' + self.name renamer = GetterSetterRenameInModule( self.project, self.name, self.pyname, getter, setter) for file in resources: job_set.started_job(file.path) if file == self.resource: result = self._change_holding_module(changes, renamer, getter, setter) changes.add_change(ChangeContents(self.resource, result)) else: result = renamer.get_changed_module(file) if result is not None: changes.add_change(ChangeContents(file, result)) job_set.finished_job() return changes def get_field_name(self): """Get the name of the field to be encapsulated""" return self.name def _is_an_attribute(self, pyname): if pyname is not None and isinstance(pyname, pynames.AssignedName): pymodule, lineno = self.pyname.get_definition_location() scope = pymodule.get_scope().\ get_inner_scope_for_line(lineno) if scope.get_kind() == 'Class': return pyname in scope.get_names().values() parent = scope.parent if parent is not None and parent.get_kind() == 'Class': return pyname in parent.get_names().values() return False def _get_defining_class_scope(self): defining_scope = self._get_defining_scope() if defining_scope.get_kind() == 'Function': defining_scope = defining_scope.parent return defining_scope def _get_defining_scope(self): pymodule, line = self.pyname.get_definition_location() return pymodule.get_scope().get_inner_scope_for_line(line) def _change_holding_module(self, changes, renamer, getter, setter): pymodule = self.project.get_pymodule(self.resource) class_scope = self._get_defining_class_scope() defining_object = self._get_defining_scope().pyobject start, end = sourceutils.get_body_region(defining_object) new_source = renamer.get_changed_module(pymodule=pymodule, skip_start=start, skip_end=end) if new_source is not None: pymodule = libutils.get_string_module( self.project, new_source, self.resource) class_scope = pymodule.get_scope().\ get_inner_scope_for_line(class_scope.get_start()) indents = sourceutils.get_indent(self.project) * ' ' getter = 'def %s(self):\n%sreturn self.%s' % \ (getter, indents, self.name) setter = 'def %s(self, value):\n%sself.%s = value' % \ (setter, indents, self.name) new_source = sourceutils.add_methods(pymodule, class_scope, [getter, setter]) return new_source class GetterSetterRenameInModule(object): def __init__(self, project, name, pyname, getter, setter): self.project = project self.name = name self.finder = occurrences.create_finder(project, name, pyname) self.getter = getter self.setter = setter def get_changed_module(self, resource=None, pymodule=None, skip_start=0, skip_end=0): change_finder = _FindChangesForModule(self, resource, pymodule, skip_start, skip_end) return change_finder.get_changed_module() class _FindChangesForModule(object): def __init__(self, finder, resource, pymodule, skip_start, skip_end): self.project = finder.project self.finder = finder.finder self.getter = finder.getter self.setter = finder.setter self.resource = resource self.pymodule = pymodule self.last_modified = 0 self.last_set = None self.set_index = None self.skip_start = skip_start self.skip_end = skip_end def get_changed_module(self): result = [] for occurrence in self.finder.find_occurrences(self.resource, self.pymodule): start, end = occurrence.get_word_range() if self.skip_start <= start < self.skip_end: continue self._manage_writes(start, result) result.append(self.source[self.last_modified:start]) if self._is_assigned_in_a_tuple_assignment(occurrence): raise exceptions.RefactoringError( 'Cannot handle tuple assignments in encapsulate field.') if occurrence.is_written(): assignment_type = self.worder.get_assignment_type(start) if assignment_type == '=': result.append(self.setter + '(') else: var_name = self.source[occurrence.get_primary_range()[0]: start] + self.getter + '()' result.append(self.setter + '(' + var_name + ' %s ' % assignment_type[:-1]) current_line = self.lines.get_line_number(start) start_line, end_line = self.pymodule.logical_lines.\ logical_line_in(current_line) self.last_set = self.lines.get_line_end(end_line) end = self.source.index('=', end) + 1 self.set_index = len(result) else: result.append(self.getter + '()') self.last_modified = end if self.last_modified != 0: self._manage_writes(len(self.source), result) result.append(self.source[self.last_modified:]) return ''.join(result) return None def _manage_writes(self, offset, result): if self.last_set is not None and self.last_set <= offset: result.append(self.source[self.last_modified:self.last_set]) set_value = ''.join(result[self.set_index:]).strip() del result[self.set_index:] result.append(set_value + ')') self.last_modified = self.last_set self.last_set = None def _is_assigned_in_a_tuple_assignment(self, occurance): offset = occurance.get_word_range()[0] return self.worder.is_assigned_in_a_tuple_assignment(offset) @property @utils.saveit def source(self): if self.resource is not None: return self.resource.read() else: return self.pymodule.source_code @property @utils.saveit def lines(self): if self.pymodule is None: self.pymodule = self.project.get_pymodule(self.resource) return self.pymodule.lines @property @utils.saveit def worder(self): return worder.Worder(self.source)
unknown
codeparrot/codeparrot-clean
"""Sensor from an SQL Query.""" import datetime import decimal import logging import sqlalchemy from sqlalchemy.orm import scoped_session, sessionmaker import voluptuous as vol from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) CONF_COLUMN_NAME = "column" CONF_QUERIES = "queries" CONF_QUERY = "query" def validate_sql_select(value): """Validate that value is a SQL SELECT query.""" if not value.lstrip().lower().startswith("select"): raise vol.Invalid("Only SELECT queries allowed") return value _QUERY_SCHEME = vol.Schema( { vol.Required(CONF_COLUMN_NAME): cv.string, vol.Required(CONF_NAME): cv.string, vol.Required(CONF_QUERY): vol.All(cv.string, validate_sql_select), vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, } ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the SQL sensor platform.""" db_url = config.get(CONF_DB_URL, None) if not db_url: db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE)) try: engine = sqlalchemy.create_engine(db_url) sessmaker = scoped_session(sessionmaker(bind=engine)) # Run a dummy query just to test the db_url sess = sessmaker() sess.execute("SELECT 1;") except sqlalchemy.exc.SQLAlchemyError as err: _LOGGER.error("Couldn't connect using %s DB_URL: %s", db_url, err) return finally: sess.close() queries = [] for query in config.get(CONF_QUERIES): name = query.get(CONF_NAME) query_str = query.get(CONF_QUERY) unit = query.get(CONF_UNIT_OF_MEASUREMENT) value_template = query.get(CONF_VALUE_TEMPLATE) column_name = query.get(CONF_COLUMN_NAME) if value_template is not None: value_template.hass = hass sensor = SQLSensor( name, sessmaker, query_str, column_name, unit, value_template ) queries.append(sensor) add_entities(queries, True) class SQLSensor(Entity): """Representation of an SQL sensor.""" def __init__(self, name, sessmaker, query, column, unit, value_template): """Initialize the SQL sensor.""" self._name = name if "LIMIT" in query: self._query = query else: self._query = query.replace(";", " LIMIT 1;") self._unit_of_measurement = unit self._template = value_template self._column_name = column self.sessionmaker = sessmaker self._state = None self._attributes = None @property def name(self): """Return the name of the query.""" return self._name @property def state(self): """Return the query's current state.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._unit_of_measurement @property def device_state_attributes(self): """Return the state attributes.""" return self._attributes def update(self): """Retrieve sensor data from the query.""" try: sess = self.sessionmaker() result = sess.execute(self._query) self._attributes = {} if not result.returns_rows or result.rowcount == 0: _LOGGER.warning("%s returned no results", self._query) self._state = None return for res in result: _LOGGER.debug("result = %s", res.items()) data = res[self._column_name] for key, value in res.items(): if isinstance(value, decimal.Decimal): value = float(value) if isinstance(value, datetime.date): value = str(value) self._attributes[key] = value except sqlalchemy.exc.SQLAlchemyError as err: _LOGGER.error("Error executing query %s: %s", self._query, err) return finally: sess.close() if self._template is not None: self._state = self._template.async_render_with_possible_json_value( data, None ) else: self._state = data
unknown
codeparrot/codeparrot-clean
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package replacefile import ( "fmt" "io/ioutil" "os" "path/filepath" ) // AtomicWriteFile uses a temporary file along with this package's AtomicRename // function in order to provide a replacement for ioutil.WriteFile that // writes the given file into place as atomically as the underlying operating // system can support. // // The sense of "atomic" meant by this function is that the file at the // given filename will either contain the entirety of the previous contents // or the entirety of the given data array if opened and read at any point // during the execution of the function. // // On some platforms attempting to overwrite a file that has at least one // open filehandle will produce an error. On other platforms, the overwriting // will succeed but existing open handles will still refer to the old file, // even though its directory entry is no longer present. // // Although AtomicWriteFile tries its best to avoid leaving behind its // temporary file on error, some particularly messy error cases may result // in a leftover temporary file. func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { dir, file := filepath.Split(filename) if dir == "" { // If the file is in the current working directory then dir will // end up being "", but that's not right here because TempFile // treats an empty dir as meaning "use the TMPDIR environment variable". dir = "." } f, err := ioutil.TempFile(dir, file) // alongside target file and with a similar name if err != nil { return fmt.Errorf("cannot create temporary file to update %s: %s", filename, err) } tmpName := f.Name() moved := false defer func(f *os.File, name string) { // Remove the temporary file if it hasn't been moved yet. We're // ignoring errors here because there's nothing we can do about // them anyway. if !moved { os.Remove(name) } }(f, tmpName) // We'll try to apply the requested permissions. This may // not be effective on all platforms, but should at least work on // Unix-like targets and should be harmless elsewhere. if err := os.Chmod(tmpName, perm); err != nil { return fmt.Errorf("cannot set mode for temporary file %s: %s", tmpName, err) } // Write the credentials to the temporary file, then immediately close // it, whether or not the write succeeds. Note that closing the file here // is required because on Windows we can't move a file while it's open. _, err = f.Write(data) f.Close() if err != nil { return fmt.Errorf("cannot write to temporary file %s: %s", tmpName, err) } // Temporary file now replaces the original file, as atomically as // possible. (At the very least, we should not end up with a file // containing only a partial JSON object.) err = AtomicRename(tmpName, filename) if err != nil { return fmt.Errorf("failed to replace %s with temporary file %s: %s", filename, tmpName, err) } moved = true return nil }
go
github
https://github.com/hashicorp/terraform
internal/replacefile/writefile.go
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Matt Wright <matt@nobien.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: easy_install short_description: Installs Python libraries description: - Installs Python libraries, optionally in a I(virtualenv) version_added: "0.7" options: name: description: - A Python library name required: true default: null aliases: [] virtualenv: description: - an optional I(virtualenv) directory path to install into. If the I(virtualenv) does not exist, it is created automatically required: false default: null virtualenv_site_packages: version_added: "1.1" description: - Whether the virtual environment will inherit packages from the global site-packages directory. Note that if this setting is changed on an already existing virtual environment it will not have any effect, the environment must be deleted and newly created. required: false default: "no" choices: [ "yes", "no" ] virtualenv_command: version_added: "1.1" description: - The command to create the virtual environment with. For example C(pyvenv), C(virtualenv), C(virtualenv2). required: false default: virtualenv executable: description: - The explicit executable or a pathname to the executable to be used to run easy_install for a specific version of Python installed in the system. For example C(easy_install-3.3), if there are both Python 2.7 and 3.3 installations in the system and you want to run easy_install for the Python 3.3 installation. version_added: "1.3" required: false default: null state: version_added: "2.0" description: - The desired state of the library. C(latest) ensures that the latest version is installed. required: false choices: [present, latest] default: present notes: - Please note that the C(easy_install) module can only install Python libraries. Thus this module is not able to remove libraries. It is generally recommended to use the M(pip) module which you can first install using M(easy_install). - Also note that I(virtualenv) must be installed on the remote host if the C(virtualenv) parameter is specified. requirements: [ "virtualenv" ] author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' # Examples from Ansible Playbooks - easy_install: name: pip state: latest # Install Bottle into the specified virtualenv. - easy_install: name: bottle virtualenv: /webapps/myapp/venv ''' import tempfile import os.path def _is_package_installed(module, name, easy_install, executable_arguments): executable_arguments = executable_arguments + ['--dry-run'] cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc, status_stdout, status_stderr = module.run_command(cmd) if rc: module.fail_json(msg=status_stderr) return not ('Reading' in status_stdout or 'Downloading' in status_stdout) def _get_easy_install(module, env=None, executable=None): candidate_easy_inst_basenames = ['easy_install'] easy_install = None if executable is not None: if os.path.isabs(executable): easy_install = executable else: candidate_easy_inst_basenames.insert(0, executable) if easy_install is None: if env is None: opt_dirs = [] else: # Try easy_install with the virtualenv directory first. opt_dirs = ['%s/bin' % env] for basename in candidate_easy_inst_basenames: easy_install = module.get_bin_path(basename, False, opt_dirs) if easy_install is not None: break # easy_install should have been found by now. The final call to # get_bin_path will trigger fail_json. if easy_install is None: basename = candidate_easy_inst_basenames[0] easy_install = module.get_bin_path(basename, True, opt_dirs) return easy_install def main(): arg_spec = dict( name=dict(required=True), state=dict(required=False, default='present', choices=['present','latest'], type='str'), virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), executable=dict(default='easy_install', required=False), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params['name'] env = module.params['virtualenv'] executable = module.params['executable'] site_packages = module.params['virtualenv_site_packages'] virtualenv_command = module.params['virtualenv_command'] executable_arguments = [] if module.params['state'] == 'latest': executable_arguments.append('--upgrade') rc = 0 err = '' out = '' if env: virtualenv = module.get_bin_path(virtualenv_command, True) if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) command = '%s %s' % (virtualenv, env) if site_packages: command += ' --system-site-packages' cwd = tempfile.gettempdir() rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd) rc += rc_venv out += out_venv err += err_venv easy_install = _get_easy_install(module, env, executable) cmd = None changed = False installed = _is_package_installed(module, name, easy_install, executable_arguments) if not installed: if module.check_mode: module.exit_json(changed=True) cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd) rc += rc_easy_inst out += out_easy_inst err += err_easy_inst changed = True if rc != 0: module.fail_json(msg=err, cmd=cmd) module.exit_json(changed=changed, binary=easy_install, name=name, virtualenv=env) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright (C) 2003-2007 CAMP # Copyright (C) 2007-2008 CAMd # Please see the accompanying LICENSE file for further information. """This module defines a PAW-class. The central object that glues everything together!""" import numpy as np from ase.units import Bohr, Hartree from ase.dft import monkhorst_pack import gpaw.io import gpaw.mpi as mpi import gpaw.occupations as occupations from gpaw import dry_run, memory_estimate_depth, KohnShamConvergenceError from gpaw.hooks import hooks from gpaw.density import Density from gpaw.eigensolvers import get_eigensolver from gpaw.band_descriptor import BandDescriptor from gpaw.grid_descriptor import GridDescriptor from gpaw.kohnsham_layouts import get_KohnSham_layouts from gpaw.hamiltonian import Hamiltonian from gpaw.utilities.timing import Timer from gpaw.xc import XC from gpaw.kpt_descriptor import KPointDescriptor from gpaw.wavefunctions.base import EmptyWaveFunctions from gpaw.wavefunctions.fd import FDWaveFunctions from gpaw.wavefunctions.lcao import LCAOWaveFunctions from gpaw.wavefunctions.pw import PW from gpaw.utilities.memory import MemNode, maxrss from gpaw.parameters import InputParameters from gpaw.setup import Setups from gpaw.output import PAWTextOutput from gpaw.scf import SCFLoop from gpaw.forces import ForceCalculator from gpaw.utilities import h2gpts class PAW(PAWTextOutput): """This is the main calculation object for doing a PAW calculation.""" timer_class = Timer def __init__(self, filename=None, **kwargs): """ASE-calculator interface. The following parameters can be used: `nbands`, `xc`, `kpts`, `spinpol`, `gpts`, `h`, `charge`, `usesymm`, `width`, `mixer`, `hund`, `lmax`, `fixdensity`, `convergence`, `txt`, `parallel`, `softgauss` and `stencils`. If you don't specify any parameters, you will get: Defaults: neutrally charged, LDA, gamma-point calculation, a reasonable grid-spacing, zero Kelvin electronic temperature, and the number of bands will be equal to the number of atomic orbitals present in the setups. Only occupied bands are used in the convergence decision. The calculation will be spin-polarized if and only if one or more of the atoms have non-zero magnetic moments. Text output will be written to standard output. For a non-gamma point calculation, the electronic temperature will be 0.1 eV (energies are extrapolated to zero Kelvin) and all symmetries will be used to reduce the number of **k**-points.""" PAWTextOutput.__init__(self) self.grid_descriptor_class = GridDescriptor self.input_parameters = InputParameters() self.timer = self.timer_class() self.scf = None self.forces = ForceCalculator(self.timer) self.wfs = EmptyWaveFunctions() self.occupations = None self.density = None self.hamiltonian = None self.atoms = None self.initialized = False # Possibly read GPAW keyword arguments from file: if filename is not None and filename.endswith('.gkw'): from gpaw.utilities.kwargs import load parameters = load(filename) parameters.update(kwargs) kwargs = parameters filename = None # XXX if filename is not None: comm = kwargs.get('communicator', mpi.world) reader = gpaw.io.open(filename, 'r', comm) self.atoms = gpaw.io.read_atoms(reader) par = self.input_parameters par.read(reader) self.set(**kwargs) if filename is not None: # Setups are not saved in the file if the setups were not loaded # *from* files in the first place if par.setups is None: if par.idiotproof: raise RuntimeError('Setups not specified in file. Use ' 'idiotproof=False to proceed anyway.') else: par.setups = {None : 'paw'} if par.basis is None: if par.idiotproof: raise RuntimeError('Basis not specified in file. Use ' 'idiotproof=False to proceed anyway.') else: par.basis = {} self.initialize() self.read(reader) self.print_cell_and_parameters() self.observers = [] def read(self, reader): gpaw.io.read(self, reader) def set(self, **kwargs): p = self.input_parameters # Prune input for things that didn't change for key, value in kwargs.items(): if key == 'kpts': oldbzk_kc = kpts2ndarray(p.kpts) newbzk_kc = kpts2ndarray(value) if (len(oldbzk_kc) == len(newbzk_kc) and (oldbzk_kc == newbzk_kc).all()): kwargs.pop('kpts') elif np.all(p[key] == value): kwargs.pop(key) if (kwargs.get('h') is not None) and (kwargs.get('gpts') is not None): raise TypeError("""You can't use both "gpts" and "h"!""") if 'h' in kwargs: p['gpts'] = None if 'gpts' in kwargs: p['h'] = None # Special treatment for dictionary parameters: for name in ['convergence', 'parallel']: if kwargs.get(name) is not None: tmp = p[name] tmp.update(kwargs[name]) kwargs[name] = tmp self.initialized = False for key in kwargs: if key == 'basis' and p['mode'] == 'fd': continue if key == 'eigensolver': self.wfs.set_eigensolver(None) if key in ['fixmom', 'mixer', 'verbose', 'txt', 'hund', 'random', 'eigensolver', 'poissonsolver', 'idiotproof', 'notify']: continue if key in ['convergence', 'fixdensity', 'maxiter']: self.scf = None continue # More drastic changes: self.scf = None self.wfs.set_orthonormalized(False) if key in ['lmax', 'width', 'stencils', 'external', 'xc', 'occupations']: self.hamiltonian = None self.occupations = None elif key in ['charge']: self.hamiltonian = None self.density = None self.wfs = EmptyWaveFunctions() self.occupations = None elif key in ['kpts', 'nbands']: self.wfs = EmptyWaveFunctions() self.occupations = None elif key in ['h', 'gpts', 'setups', 'spinpol', 'usesymm', 'parallel', 'communicator', 'dtype']: self.density = None self.occupations = None self.hamiltonian = None self.wfs = EmptyWaveFunctions() elif key in ['mode', 'basis']: self.wfs = EmptyWaveFunctions() elif key in ['parsize', 'parsize_bands', 'parstride_bands']: name = {'parsize': 'domain', 'parsize_bands': 'band', 'parstride_bands': 'stridebands'}[key] raise DeprecationWarning("Keyword argument has been moved " \ "to the 'parallel' dictionary keyword under '%s'." % name) else: raise TypeError("Unknown keyword argument: '%s'" % key) p.update(kwargs) def calculate(self, atoms=None, converge=False, force_call_to_set_positions=False): """Update PAW calculaton if needed.""" self.timer.start('Initialization') if atoms is None: atoms = self.atoms if self.atoms is None: # First time: self.initialize(atoms) self.set_positions(atoms) elif (len(atoms) != len(self.atoms) or (atoms.get_atomic_numbers() != self.atoms.get_atomic_numbers()).any() or (atoms.get_initial_magnetic_moments() != self.atoms.get_initial_magnetic_moments()).any() or (atoms.get_cell() != self.atoms.get_cell()).any() or (atoms.get_pbc() != self.atoms.get_pbc()).any()): # Drastic changes: self.wfs = EmptyWaveFunctions() self.occupations = None self.density = None self.hamiltonian = None self.scf = None self.initialize(atoms) self.set_positions(atoms) elif not self.initialized: self.initialize(atoms) self.set_positions(atoms) elif (atoms.get_positions() != self.atoms.get_positions()).any(): self.density.reset() self.set_positions(atoms) elif not self.scf.converged: # Do not call scf.check_convergence() here as it overwrites # scf.converged, and setting scf.converged is the only # 'practical' way for a user to force the calculation to proceed self.set_positions(atoms) elif force_call_to_set_positions: self.set_positions(atoms) self.timer.stop('Initialization') if self.scf.converged: return else: self.print_cell_and_parameters() self.timer.start('SCF-cycle') for iter in self.scf.run(self.wfs, self.hamiltonian, self.density, self.occupations): self.call_observers(iter) self.print_iteration(iter) self.iter = iter self.timer.stop('SCF-cycle') if self.scf.converged: self.call_observers(iter, final=True) self.print_converged(iter) if 'converged' in hooks: hooks['converged'](self) elif converge: if 'not_converged' in hooks: hooks['not_converged'](self) raise KohnShamConvergenceError('Did not converge!') def initialize_positions(self, atoms=None): """Update the positions of the atoms.""" if atoms is None: atoms = self.atoms else: # Save the state of the atoms: self.atoms = atoms.copy() self.check_atoms() spos_ac = atoms.get_scaled_positions() % 1.0 self.wfs.set_positions(spos_ac) self.density.set_positions(spos_ac, self.wfs.rank_a) self.hamiltonian.set_positions(spos_ac, self.wfs.rank_a) return spos_ac def set_positions(self, atoms=None): """Update the positions of the atoms and initialize wave functions.""" spos_ac = self.initialize_positions(atoms) self.wfs.initialize(self.density, self.hamiltonian, spos_ac) self.scf.reset() self.forces.reset() self.print_positions() def initialize(self, atoms=None): """Inexpensive initialization.""" if atoms is None: atoms = self.atoms else: # Save the state of the atoms: self.atoms = atoms.copy() par = self.input_parameters world = par.communicator if world is None: world = mpi.world elif hasattr(world, 'new_communicator'): # Check for whether object has correct type already # # Using isinstance() is complicated because of all the # combinations, serial/parallel/debug... pass else: # world should be a list of ranks: world = mpi.world.new_communicator(np.asarray(world)) self.wfs.world = world self.set_text(par.txt, par.verbose) natoms = len(atoms) pos_av = atoms.get_positions() / Bohr cell_cv = atoms.get_cell() pbc_c = atoms.get_pbc() Z_a = atoms.get_atomic_numbers() magmom_a = atoms.get_initial_magnetic_moments() magnetic = magmom_a.any() spinpol = par.spinpol if par.hund: if natoms != 1: raise ValueError('hund=True arg only valid for single atoms!') spinpol = True if spinpol is None: spinpol = magnetic elif magnetic and not spinpol: raise ValueError('Non-zero initial magnetic moment for a ' 'spin-paired calculation!') nspins = 1 + int(spinpol) if isinstance(par.xc, str): xc = XC(par.xc) else: xc = par.xc setups = Setups(Z_a, par.setups, par.basis, par.lmax, xc, world) # K-point descriptor kd = KPointDescriptor(par.kpts, nspins) width = par.width if width is None: if kd.gamma: width = 0.0 else: width = 0.1 # eV else: assert par.occupations is None if par.gpts is not None and par.h is None: N_c = np.array(par.gpts) else: if par.h is None: self.text('Using default value for grid spacing.') h = 0.2 else: h = par.h N_c = h2gpts(h, cell_cv) cell_cv /= Bohr if hasattr(self, 'time') or par.dtype==complex: dtype = complex else: if kd.gamma: dtype = float else: dtype = complex kd.set_symmetry(atoms, setups, par.usesymm, N_c) nao = setups.nao nvalence = setups.nvalence - par.charge nbands = par.nbands if nbands is None: nbands = nao elif nbands > nao and par.mode == 'lcao': raise ValueError('Too many bands for LCAO calculation: ' + '%d bands and only %d atomic orbitals!' % (nbands, nao)) if nvalence < 0: raise ValueError( 'Charge %f is not possible - not enough valence electrons' % par.charge) M = magmom_a.sum() if par.hund: f_si = setups[0].calculate_initial_occupation_numbers( magmom=0, hund=True, charge=par.charge, nspins=nspins) Mh = f_si[0].sum() - f_si[1].sum() if magnetic and M != Mh: raise RuntimeError('You specified a magmom that does not' 'agree with hunds rule!') else: M = Mh if nbands <= 0: nbands = int(nvalence + M + 0.5) // 2 + (-nbands) if nvalence > 2 * nbands: raise ValueError('Too few bands! Electrons: %d, bands: %d' % (nvalence, nbands)) if par.width is not None: self.text('**NOTE**: please start using ' 'occupations=FermiDirac(width).') if par.fixmom: self.text('**NOTE**: please start using ' 'occupations=FermiDirac(width, fixmagmom=True).') if self.occupations is None: if par.occupations is None: # Create object for occupation numbers: self.occupations = occupations.FermiDirac(width, par.fixmom) else: self.occupations = par.occupations self.occupations.magmom = M cc = par.convergence if par.mode == 'lcao': niter_fixdensity = 0 else: niter_fixdensity = None if self.scf is None: self.scf = SCFLoop( cc['eigenstates'] * nvalence, cc['energy'] / Hartree * max(nvalence, 1), cc['density'] * nvalence, par.maxiter, par.fixdensity, niter_fixdensity) parsize, parsize_bands = par.parallel['domain'], par.parallel['band'] if parsize_bands is None: parsize_bands = 1 # TODO delete/restructure so all checks are in BandDescriptor if nbands % parsize_bands != 0: raise RuntimeError('Cannot distribute %d bands to %d processors' % (nbands, parsize_bands)) if not self.wfs: if parsize == 'domain only': #XXX this was silly! parsize = world.size domain_comm, kpt_comm, band_comm = mpi.distribute_cpus(parsize, parsize_bands, nspins, kd.nibzkpts, world, par.idiotproof) kd.set_communicator(kpt_comm) parstride_bands = par.parallel['stridebands'] bd = BandDescriptor(nbands, band_comm, parstride_bands) if (self.density is not None and self.density.gd.comm.size != domain_comm.size): # Domain decomposition has changed, so we need to # reinitialize density and hamiltonian: if par.fixdensity: raise RuntimeError("I'm confused - please specify parsize." ) self.density = None self.hamiltonian = None # Construct grid descriptor for coarse grids for wave functions: gd = self.grid_descriptor_class(N_c, cell_cv, pbc_c, domain_comm, parsize) # do k-point analysis here? XXX args = (gd, nvalence, setups, bd, dtype, world, kd, self.timer) if par.mode == 'lcao': # Layouts used for general diagonalizer sl_lcao = par.parallel['sl_lcao'] if sl_lcao is None: sl_lcao = par.parallel['sl_default'] lcaoksl = get_KohnSham_layouts(sl_lcao, 'lcao', gd, bd, dtype, nao=nao, timer=self.timer) self.wfs = LCAOWaveFunctions(lcaoksl, *args) elif par.mode == 'fd' or isinstance(par.mode, PW): # buffer_size keyword only relevant for fdpw buffer_size = par.parallel['buffer_size'] # Layouts used for diagonalizer sl_diagonalize = par.parallel['sl_diagonalize'] if sl_diagonalize is None: sl_diagonalize = par.parallel['sl_default'] diagksl = get_KohnSham_layouts(sl_diagonalize, 'fd', gd, bd, dtype, buffer_size=buffer_size, timer=self.timer) # Layouts used for orthonormalizer sl_inverse_cholesky = par.parallel['sl_inverse_cholesky'] if sl_inverse_cholesky is None: sl_inverse_cholesky = par.parallel['sl_default'] if sl_inverse_cholesky != sl_diagonalize: message = 'sl_inverse_cholesky != sl_diagonalize ' \ 'is not implemented.' raise NotImplementedError(message) orthoksl = get_KohnSham_layouts(sl_inverse_cholesky, 'fd', gd, bd, dtype, buffer_size=buffer_size, timer=self.timer) # Use (at most) all available LCAO for initialization lcaonbands = min(nbands, nao) lcaobd = BandDescriptor(lcaonbands, band_comm, parstride_bands) assert nbands <= nao or bd.comm.size == 1 assert lcaobd.mynbands == min(bd.mynbands, nao) #XXX # Layouts used for general diagonalizer (LCAO initialization) sl_lcao = par.parallel['sl_lcao'] if sl_lcao is None: sl_lcao = par.parallel['sl_default'] initksl = get_KohnSham_layouts(sl_lcao, 'lcao', gd, lcaobd, dtype, nao=nao, timer=self.timer) if par.mode == 'fd': self.wfs = FDWaveFunctions(par.stencils[0], diagksl, orthoksl, initksl, *args) else: # Planewave basis: self.wfs = par.mode(diagksl, orthoksl, initksl, gd, nvalence, setups, bd, world, kd, self.timer) else: self.wfs = par.mode(self, *args) else: self.wfs.set_setups(setups) if not self.wfs.eigensolver: # Number of bands to converge: nbands_converge = cc['bands'] if nbands_converge == 'all': nbands_converge = nbands elif nbands_converge != 'occupied': assert isinstance(nbands_converge, int) if nbands_converge < 0: nbands_converge += nbands eigensolver = get_eigensolver(par.eigensolver, par.mode, par.convergence) eigensolver.nbands_converge = nbands_converge # XXX Eigensolver class doesn't define an nbands_converge property self.wfs.set_eigensolver(eigensolver) if self.density is None: gd = self.wfs.gd if par.stencils[1] != 9: # Construct grid descriptor for fine grids for densities # and potentials: finegd = gd.refine() else: # Special case (use only coarse grid): finegd = gd self.density = Density(gd, finegd, nspins, par.charge + setups.core_charge) self.density.initialize(setups, par.stencils[1], self.timer, magmom_a, par.hund) self.density.set_mixer(par.mixer) if self.hamiltonian is None: gd, finegd = self.density.gd, self.density.finegd self.hamiltonian = Hamiltonian(gd, finegd, nspins, setups, par.stencils[1], self.timer, xc, par.poissonsolver, par.external) xc.initialize(self.density, self.hamiltonian, self.wfs, self.occupations) self.text() self.print_memory_estimate(self.txt, maxdepth=memory_estimate_depth) self.txt.flush() if dry_run: self.dry_run() self.initialized = True def dry_run(self): # Can be overridden like in gpaw.atom.atompaw self.print_cell_and_parameters() self.txt.flush() raise SystemExit def restore_state(self): """After restart, calculate fine density and poisson solution. These are not initialized by default. TODO: Is this really the most efficient way? """ spos_ac = self.atoms.get_scaled_positions() % 1.0 self.density.nct.set_positions(spos_ac) self.density.ghat.set_positions(spos_ac) self.density.nct_G = self.density.gd.zeros() self.density.nct.add(self.density.nct_G, 1.0 / self.density.nspins) self.density.interpolate() self.density.calculate_pseudo_charge(0) self.hamiltonian.set_positions(spos_ac, self.wfs.rank_a) self.hamiltonian.update(self.density) def attach(self, function, n, *args, **kwargs): """Register observer function. Call *function* every *n* iterations using *args* and *kwargs* as arguments.""" try: slf = function.im_self except AttributeError: pass else: if slf is self: # function is a bound method of self. Store the name # of the method and avoid circular reference: function = function.im_func.func_name self.observers.append((function, n, args, kwargs)) def call_observers(self, iter, final=False): """Call all registered callback functions.""" for function, n, args, kwargs in self.observers: if ((iter % n) == 0) != final: if isinstance(function, str): function = getattr(self, function) function(*args, **kwargs) def get_reference_energy(self): return self.wfs.setups.Eref * Hartree def write(self, filename, mode='', cmr_params={}, **kwargs): """Write state to file. use mode='all' to write the wave functions. cmr_params is a dictionary that allows you to specify parameters for CMR (Computational Materials Repository). """ self.timer.start('IO') gpaw.io.write(self, filename, mode, cmr_params=cmr_params, **kwargs) self.timer.stop('IO') def get_myu(self, k, s): """Return my u corresponding to a certain kpoint and spin - or None""" # very slow, but we are sure that we have it for u in range(len(self.wfs.kpt_u)): if self.wfs.kpt_u[u].k == k and self.wfs.kpt_u[u].s == s: return u return None def get_homo_lumo(self): """Return HOMO and LUMO eigenvalues.""" return self.occupations.get_homo_lumo(self.wfs) * Hartree def estimate_memory(self, mem): """Estimate memory use of this object.""" mem_init = maxrss() # XXX initial overhead includes part of Hamiltonian mem.subnode('Initial overhead', mem_init) for name, obj in [('Density', self.density), ('Hamiltonian', self.hamiltonian), ('Wavefunctions', self.wfs), ]: obj.estimate_memory(mem.subnode(name)) def print_memory_estimate(self, txt=None, maxdepth=-1): """Print estimated memory usage for PAW object and components. maxdepth is the maximum nesting level of displayed components. The PAW object must be initialize()'d, but needs not have large arrays allocated.""" # NOTE. This should work with --dry-run=N # # However, the initial overhead estimate is wrong if this method # is called within a real mpirun/gpaw-python context. if txt is None: txt = self.txt print >> txt, 'Memory estimate' print >> txt, '---------------' mem = MemNode('Calculator', 0) try: self.estimate_memory(mem) except AttributeError, m: print >> txt, 'Attribute error:', m print >> txt, 'Some object probably lacks estimate_memory() method' print >> txt, 'Memory breakdown may be incomplete' totalsize = mem.calculate_size() mem.write(txt, maxdepth=maxdepth) def converge_wave_functions(self): """Converge the wave-functions if not present.""" if not self.wfs or not self.scf: self.initialize() else: self.wfs.initialize_wave_functions_from_restart_file() no_wave_functions = (self.wfs.kpt_u[0].psit_nG is None) converged = self.scf.check_convergence(self.density, self.wfs.eigensolver) if no_wave_functions or not converged: self.wfs.eigensolver.error = np.inf self.scf.converged = False # is the density ok ? error = self.density.mixer.get_charge_sloshing() criterion = (self.input_parameters['convergence']['density'] * self.wfs.nvalence) if error < criterion: self.scf.fix_density() self.calculate() def check_atoms(self): """Check that atoms objects are identical on all processors.""" if not mpi.compare_atoms(self.atoms, comm=self.wfs.world): raise RuntimeError('Atoms objects on different processors ' + 'are not identical!') def kpts2ndarray(kpts): """Convert kpts keyword to 2d ndarray of scaled k-points.""" if kpts is None: return np.zeros((1, 3)) if isinstance(kpts[0], int): return monkhorst_pack(kpts) return np.array(kpts)
unknown
codeparrot/codeparrot-clean
--- apiVersion: v1 kind: Namespace metadata: name: jaeger --- apiVersion: apps/v1 kind: Deployment metadata: name: jaeger-agent namespace: jaeger labels: app: jaeger-agent spec: replicas: 1 selector: matchLabels: app: jaeger-agent template: metadata: labels: app: jaeger-agent spec: containers: - name: jaeger-agent image: jaegertracing/all-in-one:latest ports: - containerPort: 6831 protocol: UDP name: agent-udp - containerPort: 16686 name: ui - containerPort: 14268 name: collector - containerPort: 4317 name: otlp-grpc - containerPort: 4318 name: otlp-http - containerPort: 5778 name: sampling env: - name: MEMORY_MAX_TRACES value: "100000" - name: SPAN_STORAGE_TYPE value: "badger" - name: COLLECTOR_OTLP_ENABLED value: "true" - name: LOG_LEVEL value: "debug" resources: limits: memory: "1Gi" cpu: "500m" requests: memory: "512Mi" cpu: "250m" readinessProbe: httpGet: path: / port: 16686 initialDelaySeconds: 10 periodSeconds: 10 livenessProbe: httpGet: path: / port: 16686 initialDelaySeconds: 30 periodSeconds: 30 --- apiVersion: v1 kind: Service metadata: name: jaeger-agent namespace: jaeger labels: app: jaeger-agent spec: selector: app: jaeger-agent ports: - name: agent-udp port: 6831 targetPort: 6831 protocol: UDP - name: ui port: 16686 targetPort: 16686 - name: collector port: 14268 targetPort: 14268 - name: otlp-grpc port: 4317 targetPort: 4317 - name: otlp-http port: 4318 targetPort: 4318 - name: sampling port: 5778 targetPort: 5778 type: ClusterIP ---
unknown
github
https://github.com/grafana/grafana
apps/iam/local/yamls/jaeger.yaml
#!/usr/bin/env python from __future__ import print_function import sys, gym, time # # Test yourself as a learning agent! Pass environment name as a command-line argument, for example: # # python keyboard_agent.py SpaceInvadersNoFrameskip-v4 # env = gym.make('SpaceInvaders-v0' if len(sys.argv)<2 else sys.argv[1]) if not hasattr(env.action_space, 'n'): raise Exception('Keyboard agent only supports discrete action spaces') ACTIONS = env.action_space.n SKIP_CONTROL = 0 # Use previous control decision SKIP_CONTROL times, that's how you # can test what skip is still usable. human_agent_action = 0 human_wants_restart = False human_sets_pause = False def key_press(key, mod): global human_agent_action, human_wants_restart, human_sets_pause if key==0xff0d: human_wants_restart = True if key==32: human_sets_pause = not human_sets_pause print("Press key : {0}".format(key)) a = int( key - ord('0') ) if a <= 0 or a >= ACTIONS: return human_agent_action = a def key_release(key, mod): global human_agent_action a = int( key - ord('0') ) if a <= 0 or a >= ACTIONS: return if human_agent_action == a: human_agent_action = 0 env.render() env.unwrapped.viewer.window.on_key_press = key_press env.unwrapped.viewer.window.on_key_release = key_release def rollout(env): global human_agent_action, human_wants_restart, human_sets_pause human_wants_restart = False obser = env.reset() skip = 0 total_reward = 0 total_timesteps = 0 while 1: if not skip: #print("taking action {}".format(human_agent_action)) a = human_agent_action total_timesteps += 1 skip = SKIP_CONTROL else: skip -= 1 obser, r, done, info = env.step(a) if r != 0: print("reward %0.3f" % r) total_reward += r window_still_open = env.render() if window_still_open==False: return False if done: break if human_wants_restart: break while human_sets_pause: env.render() time.sleep(0.1) time.sleep(0.1) print("timesteps %i reward %0.2f" % (total_timesteps, total_reward)) print("ACTIONS={}".format(ACTIONS)) print("Press keys 1 2 3 ... to take actions 1 2 3 ...") print("No keys pressed is taking action 0") while 1: window_still_open = rollout(env) if window_still_open==False: break
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python from plasTeX import Command, Environment from plasTeX.Base import textbf, textit, textsl, textrm, textsf from plasTeX.Base import List, label, newcommand, newenvironment from plasTeX.Base import renewcommand, renewenvironment from plasTeX.Base import itemize, enumerate_, description from plasTeX.Base import part, section, subsection, subsubsection from plasTeX.Base import tableofcontents, thebibliography, appendix from plasTeX.Base import abstract, verse, quotation, quote, footnote from plasTeX.Packages.color import color from plasTeX.Packages.graphicx import includegraphics from plasTeX.Packages.alltt import alltt as semiverbatim from plasTeX.Packages.hyperref import hypertarget, hyperlink from plasTeX.Packages.article import * class frame(Command): args = '< overlay > self' subtitle = None def invoke(self, tex): # This macro can be an environment or a command each # with different arguments. if self.macroMode == Command.MODE_BEGIN or \ self.macroMode == Command.MODE_END: f = self.ownerDocument.createElement('frameenv') f.parentNode = self.parentNode f.macroMode = self.macroMode f.invoke(tex) # Add to frames collection if self.macroMode == Command.MODE_BEGIN: f.addToFrames() return [f] # Add to frames collection self.addToFrames() return Command.invoke(self, tex) def addToFrames(self): """ Add this frame to the frame collection """ u = self.ownerDocument.userdata frames = u.get('frames') if frames is None: frames = [] u['frames'] = frames frames.append(self) class frameenv(Environment): args = '< overlay > < defaultoverlay > [ options:dict ] { title } { subtitle }' subtitle = None forcePars = True def addToFrames(self): """ Add this frame to the frame collection """ u = self.ownerDocument.userdata frames = u.get('frames') if frames is None: frames = [] u['frames'] = frames frames.append(self) class frametitle(Command): args = '< overlay > [ shorttitle ] self' def invoke(self, tex): Command.invoke(self, tex) self.ownerDocument.userdata['frames'][-1].title = self class framesubtitle(Command): args = '< overlay > self' def invoke(self, tex): Command.invoke(self, tex) self.ownerDocument.userdata['frames'][-1].subtitle = self class setbeamertemplate(Command): args = '< overlay > theme [ options ] [ suboptions ]' class setbeamersize(Command): args = 'options:dict' class logo(Command): args = '< overlay > self' class setbeamercolor(Command): args = 'mode color' class pause(Command): args = '[ number:int ]' class onslide(Command): args = '*+ < overlay > { self }' class only(Command): args = '< overlay > self < overlay2 >' class onlyenv(Environment): args = '< overlay >' class uncover(Command): args = '< overlay > self' class uncoverenv(Environment): args = uncover.args class visible(Command): args = '< overlay > self' class visibleenv(Environment): args = visible.args class invisible(Command): args = '< overlay > self' class invisibleenv(Environment): args = invisible.args class alt(Command): args = '< overlay > default alternative < overlay2 >' class altenv(Environment): args = '< overlay > begin end alternatebegin alternate end < overlay2 >' class temporal(Command): args = '< overlay > before default after' class alert(Command): args = '< overlay > self' class overlayarea(Environment): args = 'width height' class overprint(Environment): args = 'width' List.item.args = '< alert >' + List.item.args + '< alert2 >' textbf.args = '< overlay >' + textbf.args textit.args = '< overlay >' + textit.args textsl.args = '< overlay >' + textsl.args textrm.args = '< overlay >' + textrm.args textsf.args = '< overlay >' + textsf.args color.args = '< overlay >' + color.args label.args = '< overlay >' + label.args includegraphics.args = '< overlay >' + includegraphics.args newcommand.args = '< overlay >' + newcommand.args renewcommand.args = '< overlay >' + renewcommand.args newenvironment.args = '< overlay >' + newenvironment.args renewenvironment.args = '< overlay >' + renewenvironment.args itemize.args = '[ overlay ]' enumerate_.args = '[ overlay ] [ template ]' description.args = '[ overlay ] [ longtext ]' section.args = '< overlay >' + section.args subsection.args = '< overlay >' + subsection.args subsubsection.args = '< overlay >' + subsubsection.args part.args = '< overlay >' + part.args thebibliography.bibitem.args = '< overlay >' + thebibliography.bibitem.args appendix.args = '< overlay >' + appendix.args hypertarget.args = '< overlay >' + hypertarget.args hyperlink.args = '< overlay >' + hyperlink.args + '< overlay2 >' tableofcontents.args = '[ options:dict ]' + tableofcontents.args abstract.args = '< overlay >' + abstract.args verse.args = '< overlay >' + verse.args quotation.args = '< overlay >' + quotation.args quote.args = '< overlay >' + quote.args footnote.args = '< overlay > [ options:dict ]' + footnote.args class resetcounteronoverlays(Command): args = 'counter' class resetcountonoverlays(Command): args = 'count' class action(Command): args = '< action > self' class actionenv(Environment): args = '< action >' class beamerdefaultoverlayspecification(Command): args = 'overlay' class AtBeginSection(Command): args = '[ special ] text' class AtBeginSubsection(AtBeginSection): pass class AtBeginSubsubsection(AtBeginSection): pass class partpage(Command): pass class AtBeginPart(Command): args = 'text' class lecture(Command): args = '[ shorttitle ] title { label }' class includeonlylecture(Command): args = 'label' class AtBeginLecture(Command): args = 'text' class beamerbutton(Command): args = 'self' class beamergotobutton(Command): args = 'self' class beamerskipbutton(Command): args = 'self' class beamerreturnbutton(Command): args = 'self' class HyperlinkCommand(Command): args = '< overlay > self < overlay2 >' class hyperlinkslideprev(HyperlinkCommand): pass class hyperlinkslidenext(HyperlinkCommand): pass class hyperlinkframestart(HyperlinkCommand): pass class hyperlinkframeend(HyperlinkCommand): pass class hyperlinkframestartnext(HyperlinkCommand): pass class hyperlinkframeendprev(HyperlinkCommand): pass class hyperlinkpresentationstart(HyperlinkCommand): pass class hyperlinkpresentationend(HyperlinkCommand): pass class hyperlinkappendixstart(HyperlinkCommand): pass class hyperlinkappendixend(HyperlinkCommand): pass class hyperlinkdocumentstart(HyperlinkCommand): pass class hyperlinkdocumentend(HyperlinkCommand): pass class againframe(Command): args = '< overlay > [ default ] [ options:dict ] name' class framezoom(Command): args = '< buttonoverlay > < zoomedoverlay > [ options:dict ] ( pos:list ) ( zoom:list )' class structure(Command): args = '< overlay > self' class structureenv(Environment): args = '< overlay >' class block(Environment): args = '< action > title < action2 >' class alertblock(Environment): args = '< action > title < action2 >' class exampleblock(Environment): args = '< action > title < action2 >' # # Theorems # # theorem.args = '< action > [ text ] < action2 >' # corollary.args = '< action > [ text ] < action2 >' # definition.args = '< action > [ text ] < action2 >' # definitions.args = '< action > [ text ] < action2 >' # fact.args = '< action > [ text ] < action2 >' # example.args = '< action > [ text ] < action2 >' # examples.args = '< action > [ text ] < action2 >' class beamercolorbox(Environment): args = '[ options:dict ] color' class beamerboxesrounded(Environment): args = '[ options:dict ] title' class columns(Environment): args = '[ options:dict ]' class column(Command): args = '[ placement ] width' def invoke(self, tex): # This macro can be an environment or a command each # with different arguments. if self.macroMode == Command.MODE_BEGIN or \ self.macroMode == Command.MODE_END: f = self.ownerDocument.createElement('columnenv') f.parentNode = self.parentNode f.macroMode = self.macroMode res = f.invoke(tex) if res is None: res = [f] return res return Command.invoke(self, tex) class columnenv(Environment): args = column.args class movie(Command): args = '[ options:dict ] text filename:str' class hyperlinkmovie(Command): args = '[ options:dict ] label text' class animate(Command): args = '< overlay >' class animatevalue(Command): args = '< interval > name start end' class multiinclude(Command): args = '[ overlay ] [ options:dict ] filename:str' class sound(Command): args = '[ options:dict ] text filename:str' class hyperlinksound(Command): args = '[ options:dict ] label text' class hyperlinkmute(Command): args = 'text' # # Transitions # class TransitionCommand(Command): args = '< overlay > [ options:dict ]' class transblindshorizontal(TransitionCommand): pass class transblindsvertical(TransitionCommand): pass class transboxin(TransitionCommand): pass class transboxout(TransitionCommand): pass class transdissolve(TransitionCommand): pass class transglitter(TransitionCommand): pass class transsplitverticalout(TransitionCommand): pass class transsplitverticalin(TransitionCommand): pass class transsplithorizontalin(TransitionCommand): pass class transsplithorizontalout(TransitionCommand): pass class transwipe(TransitionCommand): pass class transduration(Command): args = '< overlay > seconds:int' # # Themes # class usetheme(Command): args = '[ options:dict ] name:list:str' class usecolortheme(Command): args = '[ options:dict ] name:list:str' class usefonttheme(Command): args = '[ options:dict ] name:list:str' class useinnertheme(Command): args = '[ options:dict ] name:list:str' class useoutertheme(Command): args = '[ options:dict ] name:list:str' class addheadbox(Command): args = 'color template' class addfootbox(Command): args = 'color template'
unknown
codeparrot/codeparrot-clean
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tensorflow import ( "fmt" "reflect" "testing" ) func TestOperationAttrs(t *testing.T) { g := NewGraph() i := 0 makeConst := func(v interface{}) Output { op, err := Const(g, fmt.Sprintf("const/%d/%+v", i, v), v) i++ if err != nil { t.Fatal(err) } return op } makeTensor := func(v interface{}) *Tensor { tensor, err := NewTensor(v) if err != nil { t.Fatal(err) } return tensor } cases := []OpSpec{ { Name: "type", Type: "Placeholder", Attrs: map[string]interface{}{ "dtype": Float, }, }, { Name: "list(float)", Type: "Bucketize", Input: []Input{ makeConst([]float32{1, 2, 3, 4}), }, Attrs: map[string]interface{}{ "boundaries": []float32{0, 1, 2, 3, 4, 5}, }, }, { Name: "list(float) empty", Type: "Bucketize", Input: []Input{ makeConst([]float32{}), }, Attrs: map[string]interface{}{ "boundaries": []float32(nil), }, }, /* TODO(ashankar): debug this issue and add it back later. { Name: "list(type),list(shape)", Type: "InfeedEnqueueTuple", Input: []Input{ OutputList([]Output{ makeConst(float32(1)), makeConst([][]int32{{2}}), }), }, Attrs: map[string]interface{}{ "dtypes": []DataType{Float, Int32}, "shapes": []Shape{ScalarShape(), MakeShape(1, 1)}, }, }, { Name: "list(type),list(shape) empty", Type: "InfeedEnqueueTuple", Input: []Input{ OutputList([]Output{ makeConst([][]int32{{2}}), }), }, Attrs: map[string]interface{}{ "dtypes": []DataType{Int32}, "shapes": []Shape(nil), }, }, { Name: "list(type) empty,string empty,int", Type: "_XlaSendFromHost", Input: []Input{ OutputList([]Output{}), makeConst(""), }, Attrs: map[string]interface{}{ "Tinputs": []DataType(nil), "key": "", "device_ordinal": int64(0), }, }, */ { Name: "list(int),int", Type: "StringToHashBucketStrong", Input: []Input{ makeConst(""), }, Attrs: map[string]interface{}{ "num_buckets": int64(2), "key": []int64{1, 2}, }, }, { Name: "list(int) empty,int", Type: "StringToHashBucketStrong", Input: []Input{ makeConst(""), }, Attrs: map[string]interface{}{ "num_buckets": int64(2), "key": ([]int64)(nil), }, }, { Name: "list(string),type", Type: "TensorSummary", Input: []Input{ makeConst(""), }, Attrs: map[string]interface{}{ "T": String, "labels": []string{"foo", "bar"}, }, }, { Name: "list(string) empty,type", Type: "TensorSummary", Input: []Input{ makeConst(""), }, Attrs: map[string]interface{}{ "T": String, "labels": ([]string)(nil), }, }, { Name: "tensor", Type: "Const", Attrs: map[string]interface{}{ "dtype": String, "value": makeTensor("foo"), }, }, } for i, spec := range cases { op, err := g.AddOperation(spec) if err != nil { t.Fatal(err) } for key, want := range spec.Attrs { out, err := op.Attr(key) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(out, want) { t.Fatalf("%d. %q: Got %#v, wanted %#v", i, key, out, want) } wantT, ok := want.(*Tensor) if ok { wantVal := wantT.Value() outVal := out.(*Tensor).Value() if !reflect.DeepEqual(outVal, wantVal) { t.Fatalf("%d. %q: Got %#v, wanted %#v", i, key, outVal, wantVal) } } } } }
go
github
https://github.com/tensorflow/tensorflow
tensorflow/go/attrs_test.go
# This file is part of wger Workout Manager. # # wger Workout Manager is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # wger Workout Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # Standard Library import datetime # Django from django.contrib.auth.models import User from django.core.cache import cache from django.core.exceptions import ValidationError from django.urls import ( reverse, reverse_lazy ) # wger from wger.core.tests import api_base_test from wger.core.tests.base_testcase import ( WgerAddTestCase, WgerDeleteTestCase, WgerEditTestCase, WgerTestCase ) from wger.manager.models import ( Workout, WorkoutLog, WorkoutSession ) from wger.utils.cache import cache_mapper """ Tests for workout sessions """ class AddWorkoutSessionTestCase(WgerAddTestCase): """ Tests adding a workout session """ object_class = WorkoutSession url = reverse_lazy('manager:session:add', kwargs={'workout_pk': 1, 'year': datetime.date.today().year, 'month': datetime.date.today().month, 'day': datetime.date.today().day}) data = { 'user': 1, 'workout': 1, 'date': datetime.date.today(), 'notes': 'Some interesting and deep insights', 'impression': '3', 'time_start': datetime.time(10, 0), 'time_end': datetime.time(13, 0) } class EditWorkoutSessionTestCase(WgerEditTestCase): """ Tests editing a workout session """ object_class = WorkoutSession url = 'manager:session:edit' pk = 3 data = { 'user': 1, 'workout': 2, 'date': datetime.date(2014, 1, 30), 'notes': 'My new insights', 'impression': '3', 'time_start': datetime.time(10, 0), 'time_end': datetime.time(13, 0) } class WorkoutSessionModelTestCase(WgerTestCase): """ Tests other functionality from the model """ def test_unicode(self): """ Test the unicode representation """ session = WorkoutSession() session.workout = Workout.objects.get(pk=1) session.date = datetime.date.today() self.assertEqual('{0}'.format(session), '{0} - {1}'.format(Workout.objects.get(pk=1), datetime.date.today())) class DeleteTestWorkoutTestCase(WgerDeleteTestCase): """ Tests deleting a Workout """ object_class = WorkoutSession url = 'manager:session:delete' pk = 3 class WorkoutSessionDeleteLogsTestCase(WgerTestCase): """ Tests that deleting a session can also delete all weight logs """ def test_delete_logs(self): self.user_login('admin') session = WorkoutSession.objects.get(pk=1) count_before = WorkoutLog.objects.filter(user__username=session.user.username, date=session.date).count() self.assertEqual(count_before, 1) response = self.client.post(reverse('manager:session:delete', kwargs={'pk': 1, 'logs': 'logs'})) self.assertEqual(response.status_code, 302) count_after = WorkoutLog.objects.filter(user__username=session.user.username, date=session.date).count() self.assertEqual(count_after, 0) class WorkoutSessionTestCase(WgerTestCase): """ Tests other workout session methods """ def test_model_validation(self): """ Tests the custom clean() method """ self.user_login('admin') # Values OK session = WorkoutSession() session.workout = Workout.objects.get(pk=2) session.user = User.objects.get(pk=1) session.date = datetime.date.today() session.time_start = datetime.time(12, 0) session.time_end = datetime.time(13, 0) session.impression = '3' session.notes = 'Some notes here' self.assertFalse(session.full_clean()) # No start or end times, also OK session.time_start = None session.time_end = None self.assertFalse(session.full_clean()) # Start time but not end time session.time_start = datetime.time(17, 0) session.time_end = None self.assertRaises(ValidationError, session.full_clean) # No start time but end time session.time_start = None session.time_end = datetime.time(17, 0) self.assertRaises(ValidationError, session.full_clean) # Start time after end time session.time_start = datetime.time(17, 0) session.time_end = datetime.time(13, 0) self.assertRaises(ValidationError, session.full_clean) class WorkoutLogCacheTestCase(WgerTestCase): """ Workout log cache test case """ def test_cache_update_session(self): """ Test that the caches are cleared when updating a workout session """ log_hash = hash((1, 2012, 10)) self.user_login('admin') self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10})) session = WorkoutSession.objects.get(pk=1) session.notes = 'Lorem ipsum' session.save() self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash))) def test_cache_update_session_2(self): """ Test that the caches are only cleared for a the session's month """ log_hash = hash((1, 2012, 10)) self.user_login('admin') self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10})) # Session is from 2014 session = WorkoutSession.objects.get(pk=2) session.notes = 'Lorem ipsum' session.save() self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash))) def test_cache_delete_session(self): """ Test that the caches are cleared when deleting a workout session """ log_hash = hash((1, 2012, 10)) self.user_login('admin') self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10})) session = WorkoutSession.objects.get(pk=1) session.delete() self.assertFalse(cache.get(cache_mapper.get_workout_log_list(log_hash))) def test_cache_delete_session_2(self): """ Test that the caches are only cleared for a the session's month """ log_hash = hash((1, 2012, 10)) self.user_login('admin') self.client.get(reverse('manager:workout:calendar', kwargs={'year': 2012, 'month': 10})) session = WorkoutSession.objects.get(pk=2) session.delete() self.assertTrue(cache.get(cache_mapper.get_workout_log_list(log_hash))) class WorkoutSessionApiTestCase(api_base_test.ApiBaseResourceTestCase): """ Tests the workout overview resource """ pk = 4 resource = WorkoutSession private_resource = True data = {'workout': 3, 'date': datetime.date(2014, 1, 25), 'notes': 'My new insights', 'impression': '3', 'time_start': datetime.time(10, 0), 'time_end': datetime.time(13, 0)}
unknown
codeparrot/codeparrot-clean
{ "kind": "Dashboard", "apiVersion": "dashboard.grafana.app/v2alpha1", "metadata": { "name": "test-v2alpha1-complete", "labels": { "category": "test" }, "annotations": { "description": "Complete example of v2alpha1 dashboard features" } }, "spec": { "annotations": [ { "kind": "AnnotationQuery", "spec": { "datasource": { "type": "grafana", "uid": "-- Grafana --" }, "query": { "kind": "grafana", "spec": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" } }, "enable": true, "hide": false, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations \u0026 Alerts", "builtIn": true } }, { "kind": "AnnotationQuery", "spec": { "datasource": { "type": "prometheus", "uid": "gdev-prometheus" }, "query": { "kind": "prometheus", "spec": { "expr": "changes(process_start_time_seconds[1m])", "refId": "Anno" } }, "enable": true, "hide": false, "iconColor": "yellow", "name": "Prometheus Annotations", "builtIn": false } } ], "cursorSync": "Tooltip", "description": "This dashboard demonstrates all features that need to be converted from v2alpha1 to v2beta1", "editable": true, "elements": { "panel-1": { "kind": "Panel", "spec": { "id": 1, "title": "Panel with Conditional Rendering", "description": "This panel demonstrates conditional rendering features", "links": [], "data": { "kind": "QueryGroup", "spec": { "queries": [ { "kind": "PanelQuery", "spec": { "query": { "kind": "prometheus", "spec": { "expr": "up{job=\"grafana\"}" } }, "datasource": { "type": "prometheus", "uid": "gdev-prometheus" }, "refId": "A", "hidden": false } } ], "transformations": [ { "kind": "reduce", "spec": { "id": "reduce", "options": { "includeTimeField": false, "mode": "reduceFields", "reducers": [ "mean" ] } } } ], "queryOptions": {} } }, "vizConfig": { "kind": "stat", "spec": { "pluginVersion": "12.1.0-pre", "options": { "colorMode": "value", "graphMode": "area", "justifyMode": "auto", "textMode": "auto" }, "fieldConfig": { "defaults": { "mappings": [ { "type": "value", "options": { "0": { "text": "Down", "color": "red" }, "1": { "text": "Up", "color": "green" } } } ], "thresholds": { "mode": "absolute", "steps": [ { "value": 0, "color": "red" }, { "value": 1, "color": "green" } ] }, "color": { "mode": "thresholds" } }, "overrides": [] } } } } } }, "layout": { "kind": "RowsLayout", "spec": { "rows": [ { "kind": "Row", "spec": { "title": "Conditional Row", "collapse": false, "hideHeader": false, "fillScreen": false, "conditionalRendering": { "kind": "ConditionalRenderingGroup", "spec": { "visibility": "show", "condition": "and", "items": [ { "kind": "ConditionalRenderingVariable", "spec": { "variable": "group_by", "operator": "includes", "value": "instance" } }, { "kind": "ConditionalRenderingData", "spec": { "value": true } }, { "kind": "ConditionalRenderingTimeRangeSize", "spec": { "value": "1h" } } ] } }, "layout": { "kind": "GridLayout", "spec": { "items": [ { "kind": "GridLayoutItem", "spec": { "x": 0, "y": 0, "width": 24, "height": 8, "element": { "kind": "ElementReference", "name": "panel-1" } } } ] } } } } ] } }, "links": [], "liveNow": true, "preload": true, "tags": [ "test", "example", "migration" ], "timeSettings": { "timezone": "browser", "from": "now-6h", "to": "now", "autoRefresh": "10s", "autoRefreshIntervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "hideTimepicker": false, "weekStart": "monday", "fiscalYearStartMonth": 0 }, "title": "Test: Complete V2alpha1 Dashboard Example", "variables": [ { "kind": "QueryVariable", "spec": { "name": "prometheus_query", "current": { "text": "All", "value": [ "$__all" ] }, "label": "Prometheus Query", "hide": "dontHide", "refresh": "time", "skipUrlSync": false, "description": "Shows all up metrics", "datasource": { "type": "prometheus", "uid": "gdev-prometheus" }, "query": { "kind": "prometheus", "spec": { "expr": "up" } }, "regex": "", "sort": "alphabetical", "definition": "up", "options": null, "multi": true, "includeAll": true, "allowCustomValue": false } }, { "kind": "TextVariable", "spec": { "name": "text_var", "current": { "selected": true, "text": "server1", "value": "server1" }, "query": "server1,server2,server3", "label": "Text Variable", "hide": "dontHide", "skipUrlSync": false, "description": "A simple text variable" } }, { "kind": "ConstantVariable", "spec": { "name": "constant_var", "query": "production", "current": { "selected": true, "text": "production", "value": "production" }, "label": "Constant", "hide": "dontHide", "skipUrlSync": true, "description": "A constant value" } }, { "kind": "DatasourceVariable", "spec": { "name": "ds_var", "pluginId": "prometheus", "refresh": "load", "regex": "/^gdev-/", "current": { "text": "gdev-prometheus", "value": "gdev-prometheus" }, "options": [ { "text": "gdev-prometheus", "value": "gdev-prometheus" } ], "multi": false, "includeAll": false, "label": "Datasource", "hide": "dontHide", "skipUrlSync": false, "description": "Select a datasource", "allowCustomValue": false } }, { "kind": "IntervalVariable", "spec": { "name": "interval", "query": "1m,5m,10m,30m,1h,6h,12h,1d", "current": { "selected": true, "text": "5m", "value": "5m" }, "options": [ { "text": "1m", "value": "1m" }, { "text": "5m", "value": "5m" }, { "text": "10m", "value": "10m" }, { "text": "30m", "value": "30m" }, { "text": "1h", "value": "1h" }, { "text": "6h", "value": "6h" }, { "text": "12h", "value": "12h" }, { "text": "1d", "value": "1d" } ], "auto": true, "auto_min": "10s", "auto_count": 30, "refresh": "load", "label": "Interval", "hide": "dontHide", "skipUrlSync": false, "description": "Time interval selection" } }, { "kind": "CustomVariable", "spec": { "name": "custom_var", "query": "prod : Production, staging : Staging, dev : Development", "current": { "text": [ "Production" ], "value": [ "prod" ] }, "options": [ { "text": "Production", "value": "prod" }, { "text": "Staging", "value": "staging" }, { "text": "Development", "value": "dev" } ], "multi": true, "includeAll": true, "allValue": "*", "label": "Custom Options", "hide": "dontHide", "skipUrlSync": false, "description": "Custom multi-value variable", "allowCustomValue": true } }, { "kind": "GroupByVariable", "spec": { "name": "group_by", "datasource": { "type": "prometheus", "uid": "gdev-prometheus" }, "current": { "text": "instance", "value": "instance" }, "options": null, "multi": false, "label": "Group By", "hide": "dontHide", "skipUrlSync": false, "description": "Group metrics by label" } }, { "kind": "AdhocVariable", "spec": { "name": "filters", "datasource": { "type": "prometheus", "uid": "gdev-prometheus" }, "baseFilters": [ { "key": "job", "operator": "=", "value": "grafana", "condition": "AND" } ], "filters": [], "defaultKeys": [ { "text": "job", "value": "job", "expandable": true }, { "text": "instance", "value": "instance", "expandable": true } ], "label": "Filters", "hide": "dontHide", "skipUrlSync": false, "allowCustomValue": false } } ] }, "status": { "conversion": { "failed": false, "storedVersion": "v2beta1" } } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/conversion/testdata/input/v2alpha1.complete.json
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A client interface for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import sys import threading import tensorflow.python.platform import numpy as np import six from tensorflow.python import pywrap_tensorflow as tf_session from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import logging from tensorflow.python.util import compat class SessionInterface(object): """Base class for implementations of TensorFlow client sessions.""" @property def graph(self): """The underlying TensorFlow graph, to be used in building Operations.""" raise NotImplementedError('graph') @property def sess_str(self): """The TensorFlow process to which this session will connect.""" raise NotImplementedError('sess_str') def run(self, fetches, feed_dict=None): """Runs operations in the session. See `Session.run()` for details.""" raise NotImplementedError('Run') class BaseSession(SessionInterface): """A class for interacting with a TensorFlow computation. The BaseSession enables incremental graph building with inline execution of Operations and evaluation of Tensors. """ def __init__(self, target='', graph=None, config=None): """Constructs a new TensorFlow session. Args: target: (Optional) The TensorFlow execution engine to connect to. graph: (Optional) The graph to be used. If this argument is None, the default graph will be used. config: (Optional) ConfigProto proto used to configure the session. Raises: RuntimeError: If an error occurs while creating the TensorFlow session. """ if graph is None: self._graph = ops.get_default_graph() else: self._graph = graph self._opened = False self._closed = False self._current_version = 0 self._extend_lock = threading.Lock() self._target = target self._session = None opts = tf_session.TF_NewSessionOptions(target=target, config=config) try: status = tf_session.TF_NewStatus() try: self._session = tf_session.TF_NewSession(opts, status) if tf_session.TF_GetCode(status) != 0: raise RuntimeError(compat.as_text(tf_session.TF_Message(status))) finally: tf_session.TF_DeleteStatus(status) finally: tf_session.TF_DeleteSessionOptions(opts) def close(self): """Closes this session. Calling this method frees all resources associated with the session. Raises: RuntimeError: If an error occurs while closing the session. """ with self._extend_lock: if self._opened and not self._closed: self._closed = True try: status = tf_session.TF_NewStatus() tf_session.TF_CloseSession(self._session, status) if tf_session.TF_GetCode(status) != 0: raise RuntimeError(compat.as_text(tf_session.TF_Message(status))) finally: tf_session.TF_DeleteStatus(status) def __del__(self): self.close() try: status = tf_session.TF_NewStatus() if self._session is not None: tf_session.TF_DeleteSession(self._session, status) if tf_session.TF_GetCode(status) != 0: raise RuntimeError(compat.as_text(tf_session.TF_Message(status))) self._session = None finally: tf_session.TF_DeleteStatus(status) @property def graph(self): """The graph that was launched in this session.""" return self._graph @property def graph_def(self): """A serializable version of the underlying TensorFlow graph. Returns: A graph_pb2.GraphDef proto containing nodes for all of the Operations in the underlying TensorFlow graph. """ return self._graph.as_graph_def() @property def sess_str(self): return self._target def as_default(self): """Returns a context manager that makes this object the default session. Use with the `with` keyword to specify that calls to [`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or [`Tensor.run()`](../../api_docs/python/framework.md#Tensor.run) should be executed in this session. ```python c = tf.constant(..) sess = tf.Session() with sess.as_default(): assert tf.get_default_session() is sess print(c.eval()) ``` To get the current default session, use [`tf.get_default_session()`](#get_default_session). *N.B.* The `as_default` context manager *does not* close the session when you exit the context, and you must close the session explicitly. ```python c = tf.constant(...) sess = tf.Session() with sess.as_default(): print(c.eval()) # ... with sess.as_default(): print(c.eval()) sess.close() ``` Alternatively, you can use `with tf.Session():` to create a session that is automatically closed on exiting the context, including when an uncaught exception is raised. *N.B.* The default graph is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: A context manager using this session as the default session. """ return ops.default_session(self) # Eventually, this registration could be opened up to support custom # Tensor expansions. Expects tuples of (Type, fetch_fn, feed_fn), # where the signatures are: # fetch_fn : Type -> (list of Tensors, # lambda: list of fetched np.ndarray -> TypeVal) # feed_fn : Type, TypeVal -> list of (Tensor, value) # Conceptually, fetch_fn describes how to expand fetch into its # component Tensors and how to contracting the fetched results back into # a single return value. feed_fn describes how to unpack a single fed # value and map it to feeds of a Tensor and its corresponding value. # pylint: disable=g-long-lambda _REGISTERED_EXPANSIONS = [ # SparseTensors are fetched as SparseTensorValues. They can be fed # SparseTensorValues or normal tuples. (ops.SparseTensor, lambda fetch: ( [fetch.indices, fetch.values, fetch.shape], lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)), lambda feed, feed_val: list(zip( [feed.indices, feed.values, feed.shape], feed_val))), # The default catches all types and performs no expansions. (object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]), lambda feed, feed_val: [(feed, feed_val)])] # pylint: enable=g-long-lambda def run(self, fetches, feed_dict=None): """Runs the operations and evaluates the tensors in `fetches`. This method runs one "step" of TensorFlow computation, by running the necessary graph fragment to execute every `Operation` and evaluate every `Tensor` in `fetches`, substituting the values in `feed_dict` for the corresponding input values. The `fetches` argument may be a list of graph elements or a single graph element, and these determine the return value of this method. A graph element can be one of the following types: * If the *i*th element of `fetches` is an [`Operation`](../../api_docs/python/framework.md#Operation), the *i*th return value will be `None`. * If the *i*th element of `fetches` is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the *i*th return value will be a numpy ndarray containing the value of that tensor. * If the *i*th element of `fetches` is a [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor), the *i*th return value will be a [`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue) containing the value of that sparse tensor. The optional `feed_dict` argument allows the caller to override the value of tensors in the graph. Each key in `feed_dict` can be one of the following types: * If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the value may be a Python scalar, string, list, or numpy ndarray that can be converted to the same `dtype` as that tensor. Additionally, if the key is a [placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of the value will be checked for compatibility with the placeholder. * If the key is a [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor), the value should be a [`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue). Args: fetches: A single graph element, or a list of graph elements (described above). feed_dict: A dictionary that maps graph elements to values (described above). Returns: Either a single value if `fetches` is a single graph element, or a list of values if `fetches` is a list (described above). Raises: RuntimeError: If this `Session` is in an invalid state (e.g. has been closed). TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type. ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a `Tensor` that doesn't exist. """ def _fetch_fn(fetch): for tensor_type, fetch_fn, _ in BaseSession._REGISTERED_EXPANSIONS: if isinstance(fetch, tensor_type): return fetch_fn(fetch) raise TypeError('Fetch argument %r has invalid type %r' % (fetch, type(fetch))) def _feed_fn(feed, feed_val): for tensor_type, _, feed_fn in BaseSession._REGISTERED_EXPANSIONS: if isinstance(feed, tensor_type): return feed_fn(feed, feed_val) raise TypeError('Feed argument %r has invalid type %r' % (feed, type(feed))) # Check session. if self._closed: raise RuntimeError('Attempted to use a closed Session.') if self.graph.version == 0: raise RuntimeError('The Session graph is empty. Add operations to the ' 'graph before calling run().') # Validate and process fetches. is_list_fetch = isinstance(fetches, (list, tuple)) if not is_list_fetch: fetches = [fetches] unique_fetch_targets = set() target_list = [] fetch_info = [] for fetch in fetches: subfetches, fetch_contraction_fn = _fetch_fn(fetch) subfetch_names = [] for subfetch in subfetches: try: fetch_t = self.graph.as_graph_element(subfetch, allow_tensor=True, allow_operation=True) if isinstance(fetch_t, ops.Operation): target_list.append(compat.as_bytes(fetch_t.name)) else: subfetch_names.append(compat.as_bytes(fetch_t.name)) except TypeError as e: raise TypeError('Fetch argument %r of %r has invalid type %r, ' 'must be a string or Tensor. (%s)' % (subfetch, fetch, type(subfetch), str(e))) except ValueError as e: raise ValueError('Fetch argument %r of %r cannot be interpreted as a ' 'Tensor. (%s)' % (subfetch, fetch, str(e))) except KeyError as e: raise ValueError('Fetch argument %r of %r cannot be interpreted as a ' 'Tensor. (%s)' % (subfetch, fetch, str(e))) unique_fetch_targets.update(subfetch_names) fetch_info.append((subfetch_names, fetch_contraction_fn)) unique_fetch_targets = list(unique_fetch_targets) # Create request. feed_dict_string = {} # Validate and process feed_dict. if feed_dict: for feed, feed_val in feed_dict.items(): for subfeed, subfeed_val in _feed_fn(feed, feed_val): try: subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True, allow_operation=False) except Exception as e: e.message = ('Cannot interpret feed_dict key as Tensor: ' + e.message) e.args = (e.message,) raise e if isinstance(subfeed_val, ops.Tensor): raise TypeError('The value of a feed cannot be a tf.Tensor object. ' 'Acceptable feed values include Python scalars, ' 'strings, lists, or numpy ndarrays.') np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype) if subfeed_t.op.type == 'Placeholder': if not subfeed_t.get_shape().is_compatible_with(np_val.shape): raise ValueError( 'Cannot feed value of shape %r for Tensor %r, ' 'which has shape %r' % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) feed_dict_string[compat.as_bytes(subfeed_t.name)] = np_val # Run request and get response. results = self._do_run(target_list, unique_fetch_targets, feed_dict_string) # User may have fetched the same tensor multiple times, but we # only fetch them from the runtime once. Furthermore, they may # be wrapped as a tuple of tensors. Here we map the results back # to what the client asked for. fetched_results = dict(zip(unique_fetch_targets, results)) ret = [] for fetch_names, fetch_contraction_fn in fetch_info: if fetch_names: fetched_vals = [fetched_results[name] for name in fetch_names] ret.append(fetch_contraction_fn(fetched_vals)) else: ret.append(None) if is_list_fetch: return ret else: return ret[0] # Captures the name of a node in an error status. _NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =') def _do_run(self, target_list, fetch_list, feed_dict): """Runs a step based on the given fetches and feeds. Args: target_list: A list of byte arrays corresponding to names of tensors or operations to be run to, but not fetched. fetch_list: A list of byte arrays corresponding to names of tensors to be fetched and operations to be run. feed_dict: A dictionary that maps tensor names (as byte arrays) to numpy ndarrays. Returns: A list of numpy ndarrays, corresponding to the elements of `fetch_list`. If the ith element of `fetch_list` contains the name of an operation, the first Tensor output of that operation will be returned for that element. """ try: # Ensure any changes to the graph are reflected in the runtime. with self._extend_lock: if self._graph.version > self._current_version: graph_def = self._graph.as_graph_def( from_version=self._current_version) try: status = tf_session.TF_NewStatus() tf_session.TF_ExtendGraph( self._session, graph_def.SerializeToString(), status) if tf_session.TF_GetCode(status) != 0: raise RuntimeError(compat.as_text(tf_session.TF_Message(status))) self._opened = True finally: tf_session.TF_DeleteStatus(status) self._current_version = self._graph.version return tf_session.TF_Run(self._session, feed_dict, fetch_list, target_list) except tf_session.StatusNotOK as e: e_type, e_value, e_traceback = sys.exc_info() error_message = compat.as_text(e.error_message) m = BaseSession._NODEDEF_NAME_RE.search(error_message) if m is not None: node_name = m.group(1) node_def = None try: op = self._graph.get_operation_by_name(node_name) node_def = op.node_def except KeyError: op = None # pylint: disable=protected-access raise errors._make_specific_exception(node_def, op, error_message, e.code) # pylint: enable=protected-access six.reraise(e_type, e_value, e_traceback) class Session(BaseSession): """A class for running TensorFlow operations. A `Session` object encapsulates the environment in which `Operation` objects are executed, and `Tensor` objects are evaluated. For example: ```python # Build a graph. a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # Launch the graph in a session. sess = tf.Session() # Evaluate the tensor `c`. print(sess.run(c)) ``` A session may own resources, such as [variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase), and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release these resources when they are no longer required. To do this, either invoke the [`close()`](#Session.close) method on the session, or use the session as a context manager. The following two examples are equivalent: ```python # Using the `close()` method. sess = tf.Session() sess.run(...) sess.close() # Using the context manager. with tf.Session() as sess: sess.run(...) ``` The [`ConfigProto`] (https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/config.proto) protocol buffer exposes various configuration options for a session. For example, to create a session that uses soft constraints for device placement, and log the resulting placement decisions, create a session as follows: ```python # Launch the graph in a session that allows soft device placement and # logs the placement decisions. sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) ``` @@__init__ @@run @@close @@graph @@as_default """ def __init__(self, target='', graph=None, config=None): """Creates a new TensorFlow session. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()` in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. At present, no value other than the empty string is supported. graph: (Optional.) The `Graph` to be launched (described above). config: (Optional.) A [`ConfigProto`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/config.proto) protocol buffer with configuration options for the session. """ super(Session, self).__init__(target, graph, config=config) self._context_managers = [self.graph.as_default(), self.as_default()] def __enter__(self): for context_manager in self._context_managers: context_manager.__enter__() return self def __exit__(self, exec_type, exec_value, exec_tb): if exec_type is errors.OpError: logging.error('Session closing due to OpError: %s', (exec_value,)) for context_manager in reversed(self._context_managers): context_manager.__exit__(exec_type, exec_value, exec_tb) self.close() class InteractiveSession(BaseSession): """A TensorFlow `Session` for use in interactive contexts, such as a shell. The only difference with a regular `Session` is that an `InteractiveSession` installs itself as the default session on construction. The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run) will use that session to run ops. This is convenient in interactive shells and [IPython notebooks](http://ipython.org), as it avoids having to pass an explicit `Session` object to run ops. For example: ```python sess = tf.InteractiveSession() a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # We can just use 'c.eval()' without passing 'sess' print(c.eval()) sess.close() ``` Note that a regular session installs itself as the default session when it is created in a `with` statement. The common usage in non-interactive programs is to follow that pattern: ```python a = tf.constant(5.0) b = tf.constant(6.0) c = a * b with tf.Session(): # We can also use 'c.eval()' here. print(c.eval()) ``` @@__init__ @@close """ def __init__(self, target='', graph=None): """Creates a new interactive TensorFlow session. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()` in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. At present, no value other than the empty string is supported. graph: (Optional.) The `Graph` to be launched (described above). """ super(InteractiveSession, self).__init__(target, graph) self._default_session = self.as_default() self._default_session.__enter__() self._explicit_graph = graph if self._explicit_graph is not None: self._default_graph = graph.as_default() self._default_graph.__enter__() def close(self): """Closes an `InteractiveSession`.""" super(InteractiveSession, self).close() if self._explicit_graph is not None: self._default_graph.__exit__(None, None, None) self._default_session.__exit__(None, None, None)
unknown
codeparrot/codeparrot-clean
from test import support from test.test_json import PyTest, CTest class JSONTestObject: pass class TestRecursion: def test_listrecursion(self): x = [] x.append(x) try: self.dumps(x) except ValueError as exc: self.assertEqual(exc.__notes__, ["when serializing list item 0"]) else: self.fail("didn't raise ValueError on list recursion") x = [] y = [x] x.append(y) try: self.dumps(x) except ValueError as exc: self.assertEqual(exc.__notes__, ["when serializing list item 0"]*2) else: self.fail("didn't raise ValueError on alternating list recursion") y = [] x = [y, y] # ensure that the marker is cleared self.dumps(x) def test_dictrecursion(self): x = {} x["test"] = x try: self.dumps(x) except ValueError as exc: self.assertEqual(exc.__notes__, ["when serializing dict item 'test'"]) else: self.fail("didn't raise ValueError on dict recursion") x = {} y = {"a": x, "b": x} # ensure that the marker is cleared self.dumps(x) def test_defaultrecursion(self): class RecursiveJSONEncoder(self.json.JSONEncoder): recurse = False def default(self, o): if o is JSONTestObject: if self.recurse: return [JSONTestObject] else: return 'JSONTestObject' return self.json.JSONEncoder.default(o) enc = RecursiveJSONEncoder() self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"') enc.recurse = True try: enc.encode(JSONTestObject) except ValueError as exc: self.assertEqual(exc.__notes__, ["when serializing list item 0", "when serializing type object"]) else: self.fail("didn't raise ValueError on default recursion") @support.skip_if_unlimited_stack_size @support.skip_emscripten_stack_overflow() @support.skip_wasi_stack_overflow() def test_highly_nested_objects_decoding(self): very_deep = 500_000 # test that loading highly-nested objects doesn't segfault when C # accelerations are used. See #12017 with self.assertRaises(RecursionError): with support.infinite_recursion(): self.loads('{"a":' * very_deep + '1' + '}' * very_deep) with self.assertRaises(RecursionError): with support.infinite_recursion(): self.loads('{"a":' * very_deep + '[1]' + '}' * very_deep) with self.assertRaises(RecursionError): with support.infinite_recursion(): self.loads('[' * very_deep + '1' + ']' * very_deep) @support.skip_if_unlimited_stack_size @support.skip_wasi_stack_overflow() @support.skip_emscripten_stack_overflow() @support.requires_resource('cpu') def test_highly_nested_objects_encoding(self): # See #12051 l, d = [], {} for x in range(500_000): l, d = [l], {'k':d} with self.assertRaises(RecursionError): with support.infinite_recursion(5000): self.dumps(l) with self.assertRaises(RecursionError): with support.infinite_recursion(5000): self.dumps(d) @support.skip_if_unlimited_stack_size @support.skip_emscripten_stack_overflow() @support.skip_wasi_stack_overflow() def test_endless_recursion(self): # See #12051 class EndlessJSONEncoder(self.json.JSONEncoder): def default(self, o): """If check_circular is False, this will keep adding another list.""" return [o] with self.assertRaises(RecursionError): with support.infinite_recursion(1000): EndlessJSONEncoder(check_circular=False).encode(5j) class TestPyRecursion(TestRecursion, PyTest): pass class TestCRecursion(TestRecursion, CTest): pass
python
github
https://github.com/python/cpython
Lib/test/test_json/test_recursion.py
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # import unittest import glob import os from rts2saf.config import Configuration from rts2saf.analyze import SimpleAnalysis, CatalogAnalysis from rts2saf.sextract import Sextract from rts2saf.environ import Environment import logging if not os.path.isdir('/tmp/rts2saf_log'): os.mkdir('/tmp/rts2saf_log') logging.basicConfig(filename='/tmp/rts2saf_log/unittest.log', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') logger = logging.getLogger() # sequence matters def suite_simple(): suite = unittest.TestSuite() suite.addTest(TestSimpleAnalysis('test_readConfiguration')) suite.addTest(TestSimpleAnalysis('test_fitsInBasepath')) suite.addTest(TestSimpleAnalysis('test_analyze')) return suite def suite_catalog(): suite = unittest.TestSuite() suite.addTest(TestCatalogAnalysis('test_readConfiguration')) suite.addTest(TestCatalogAnalysis('test_fitsInBasepath')) suite.addTest(TestCatalogAnalysis('test_selectAndAnalyze')) return suite #@unittest.skip('class not yet implemented') class TestSimpleAnalysis(unittest.TestCase): def tearDown(self): pass def setUp(self): self.rt = Configuration(logger=logger) self.fileName='./rts2saf-flux.cfg' self.success=self.rt.readConfiguration(fileName=self.fileName) self.ev=Environment(debug=False, rt=self.rt,logger=logger) #@unittest.skip('feature not yet implemented') def test_readConfiguration(self): logger.info('== {} =='.format(self._testMethodName)) self.assertTrue(self.success, 'config file: {} faulty or not found, return value: {}'.format(self.fileName, self.success)) def test_fitsInBasepath(self): logger.info('== {} =='.format(self._testMethodName)) fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB'])) self.assertEqual(len(fitsFns), 14, 'return value: {}'.format(len(fitsFns))) def test_analyze(self): logger.info('== {} =='.format(self._testMethodName)) fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB'])) dataSxtr=list() for k, fitsFn in enumerate(fitsFns): logger.info('analyze: processing fits file: {0}'.format(fitsFn)) sxtr= Sextract(debug=False, rt=self.rt, logger=logger) dSx=sxtr.sextract(fitsFn=fitsFn) if dSx: dataSxtr.append(dSx) self.assertEqual(len(dataSxtr), 14, 'return value: {}'.format(len(dataSxtr))) an=SimpleAnalysis(debug=False, dataSxtr=dataSxtr, Ds9Display=False, FitDisplay=False, focRes=float(self.rt.cfg['FOCUSER_RESOLUTION']), ev=self.ev, rt=self.rt,logger=logger) resultFitFwhm, resultMeansFwhm, resultFitFlux, resultMeansFlux=an.analyze() self.assertAlmostEqual(resultFitFwhm.extrFitVal, 2.2175214358, places=2, msg='return value: {}'.format(resultFitFwhm.extrFitVal)) an.display() #@unittest.skip('class not yet implemented') class TestCatalogAnalysis(unittest.TestCase): def tearDown(self): pass def setUp(self): self.rt = Configuration(logger=logger) self.fileName='./rts2saf-no-filter-wheel.cfg' self.success=self.rt.readConfiguration(fileName=self.fileName) self.ev=Environment(debug=False, rt=self.rt,logger=logger) def test_readConfiguration(self): logger.info('== {} =='.format(self._testMethodName)) self.assertTrue(self.success, 'config file: {} faulty or not found, return value: {}'.format(self.fileName, self.success)) def test_fitsInBasepath(self): logger.info('== {} =='.format(self._testMethodName)) fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB'])) self.assertEqual(len(fitsFns), 14, 'return value: {}'.format(len(fitsFns))) #@unittest.skip('feature not yet implemented') def test_selectAndAnalyze(self): logger.info('== {} =='.format(self._testMethodName)) fitsFns=glob.glob('{0}/{1}'.format('../samples', self.rt.cfg['FILE_GLOB'])) dataSxtr=list() for k, fitsFn in enumerate(fitsFns): logger.info('analyze: processing fits file: {0}'.format(fitsFn)) sxtr= Sextract(debug=False, rt=self.rt, logger=logger) dSx=sxtr.sextract(fitsFn=fitsFn) if dSx: dataSxtr.append(dSx) self.assertEqual(len(dataSxtr), 14, 'return value: {}'.format(len(dataSxtr))) an=CatalogAnalysis(debug=False, dataSxtr=dataSxtr, Ds9Display=False, FitDisplay=False, focRes=float(self.rt.cfg['FOCUSER_RESOLUTION']), moduleName='rts2saf.criteria_radius', ev=self.ev, rt=self.rt, logger=logger) accRFt, rejRFt, allrFt, accRMns, recRMns, allRMns=an.selectAndAnalyze() self.assertAlmostEqual(allrFt.extrFitVal, 2.2175214358, delta=0.1, msg='return value: {}'.format(allrFt.extrFitVal)) self.assertAlmostEqual(accRFt.extrFitVal, 2.24000979001, delta=0.1, msg='return value: {}'.format(allrFt.extrFitVal)) if __name__ == '__main__': suiteSimple=suite_simple() suiteCatalog= suite_catalog() alltests = unittest.TestSuite([suiteSimple, suiteCatalog]) unittest.TextTestRunner(verbosity=0).run(alltests)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Created on 2017-09-08 10:00:22 # Project: drzj from pyspider.libs.base_handler import * from pymongo import MongoClient import json # db.spider_drzj_image.find({'speciesName':'阿修罗'}) # db.runCommand({"distinct":"spider_drzj_image", "key":"speciesName"}) # db.spider_drzj.group( { key: { 'speciesName': true}, initial: {count: 0}, reduce: function(obj,prev){ prev.count++;} } ) class Handler(BaseHandler): crawl_config = { 'itag': 'v1.0' } def __init__(self): self.base_url = 'https://www.drzj.net/plugin.php?id=drzj_tujian:tujian&act=list&from=wxapp&page=1000&kid=0&sid=0&inajax=1' self.pagination_url = 'https://www.drzj.net/plugin.php?id=drzj_tujian:tujian&act=list&from=wxapp&page={}&kid=0&sid=0&inajax=1' self.total_pages = 100 self.has_page = True self.detail_url = 'https://www.drzj.net/plugin.php?id=drzj_tujian:tujian&act=detail&from=wxapp&pid={}' self.image_prefix = 'https://mini-img.drzj.net/duotoutujian/data/img/cover/' self.image_detail = 'http://mini-img.drzj.net/duotoutujian/data/img/{}/orginal/{}' self.image_path = '/Users/anmy/Downloads/tmp/succulent/' self.mongo_url = 'mongodb://127.0.0.1:27017/succulent' self.mongo_client = MongoClient(self.mongo_url) self.mongo_db = self.mongo_client['succulent'] def on_start(self): self.crawl(self.base_url, callback=self.index_page) def index_page(self, response): for index in range(1, self.total_pages): if self.has_page: self.crawl(self.pagination_url.format(index), callback=self.list_page) def list_page(self, response): result = json.loads(response.text) species_list = result.get('data').get('list') if list and len(species_list) > 0: for each in species_list: species_id = each.get('pid') print species_id species_name = each.get('name') print species_name result = self.mongo_db['spider_drzj'].find_one({'speciesName': species_name, 'source': 'drzj'}) if result: # self.mongo_db['spider_drzj'].update_one({'_id': ObjectId(result['_id'])}, {'$set': { # 'image': self.image_prefix + each.get('img_cover'), # 'familyName': each.get('kname'), # 'familyTerminology': each.get('k_ld_name'), # 'genusName': each.get('sname'), # 'genusTerminology': each.get('s_ld_name'), # 'terminology': each.get('ld_name'), # 'alias': each.get('othername'), # 'growthSeason': each.get('season'), # 'sunshine': each.get('sun'), # 'moisture': each.get('water'), # 'breedPattern': each.get('reproduce'), # 'description': each.get('description'), # 'price': each.get('price_des') # }}) pass else: self.mongo_db['spider_drzj'].insert({ 'extSpeciesId': species_id, 'speciesName': species_name, 'image': self.image_prefix + each.get('img_cover'), 'source': 'drzj', 'familyName': each.get('kname'), 'familyTerminology': each.get('k_ld_name'), 'genusName': each.get('sname'), 'genusTerminology': each.get('s_ld_name'), 'terminology': each.get('ld_name'), 'alias': each.get('othername'), 'area': '', 'growthSeason': each.get('season'), 'sunshine': each.get('sun'), 'temperature': '', 'moisture': each.get('water'), 'breedPattern': each.get('reproduce'), 'breedDifficulty': '', 'description': each.get('description'), 'price': each.get('price_des') }) self.crawl(self.detail_url.format(species_id), callback=self.detail_page, save={'species_id': species_id, 'species_name': species_name}) else: self.has_page = False def detail_page(self, response): species_id = response.save['species_id'] species_name = response.save['species_name'] result = json.loads(response.text) photos = result.get('data').get('photo') print len(photos) if photos and len(photos) > 0: for photo in photos: image = self.image_detail.format(species_id, photo.get('largeImgName')) if image: result = self.mongo_db['spider_drzj_image'].find_one({'speciesName': species_name, 'image': image}) if not result: self.mongo_db['spider_drzj_image'].insert({ 'speciesId': species_id, 'speciesName': species_name, 'image': image })
unknown
codeparrot/codeparrot-clean
import datetime from unittest import TestCase from pymysql._compat import PY2 from pymysql import converters __all__ = ["TestConverter"] class TestConverter(TestCase): def test_escape_string(self): self.assertEqual( converters.escape_string(u"foo\nbar"), u"foo\\nbar" ) if PY2: def test_escape_string_bytes(self): self.assertEqual( converters.escape_string(b"foo\nbar"), b"foo\\nbar" ) def test_convert_datetime(self): expected = datetime.datetime(2007, 2, 24, 23, 6, 20) dt = converters.convert_datetime('2007-02-24 23:06:20') self.assertEqual(dt, expected) def test_convert_datetime_with_fsp(self): expected = datetime.datetime(2007, 2, 24, 23, 6, 20, 511581) dt = converters.convert_datetime('2007-02-24 23:06:20.511581') self.assertEqual(dt, expected) def _test_convert_timedelta(self, with_negate=False, with_fsp=False): d = {'hours': 789, 'minutes': 12, 'seconds': 34} s = '%(hours)s:%(minutes)s:%(seconds)s' % d if with_fsp: d['microseconds'] = 511581 s += '.%(microseconds)s' % d expected = datetime.timedelta(**d) if with_negate: expected = -expected s = '-' + s tdelta = converters.convert_timedelta(s) self.assertEqual(tdelta, expected) def test_convert_timedelta(self): self._test_convert_timedelta(with_negate=False, with_fsp=False) self._test_convert_timedelta(with_negate=True, with_fsp=False) def test_convert_timedelta_with_fsp(self): self._test_convert_timedelta(with_negate=False, with_fsp=True) self._test_convert_timedelta(with_negate=False, with_fsp=True) def test_convert_time(self): expected = datetime.time(23, 6, 20) time_obj = converters.convert_time('23:06:20') self.assertEqual(time_obj, expected) def test_convert_time_with_fsp(self): expected = datetime.time(23, 6, 20, 511581) time_obj = converters.convert_time('23:06:20.511581') self.assertEqual(time_obj, expected)
unknown
codeparrot/codeparrot-clean
import * as ts from "../../../_namespaces/ts.js"; import { extractTest } from "./helpers.js"; function testExtractRangeFailed(caption: string, s: string, expectedErrors: readonly string[]) { return it(caption, () => { const t = extractTest(s); const file = ts.createSourceFile("a.ts", t.source, ts.ScriptTarget.Latest, /*setParentNodes*/ true); const selectionRange = t.ranges.get("selection"); if (!selectionRange) { throw new Error(`Test ${s} does not specify selection range`); } const result = ts.refactor.extractSymbol.getRangeToExtract(file, ts.createTextSpanFromRange(selectionRange), /*invoked*/ false); assert(result.targetRange === undefined, "failure expected"); const sortedErrors = result.errors.map(e => e.messageText as string).sort(); assert.deepEqual(sortedErrors, expectedErrors.slice().sort(), "unexpected errors"); }); } function testExtractRange(caption: string, s: string) { return it(caption, () => { const t = extractTest(s); const f = ts.createSourceFile("a.ts", t.source, ts.ScriptTarget.Latest, /*setParentNodes*/ true); const selectionRange = t.ranges.get("selection"); if (!selectionRange) { throw new Error(`Test ${s} does not specify selection range`); } const result = ts.refactor.extractSymbol.getRangeToExtract(f, ts.createTextSpanFromRange(selectionRange)); const expectedRange = t.ranges.get("extracted"); if (expectedRange) { let pos: number, end: number; const targetRange = result.targetRange!; if (ts.isArray(targetRange.range)) { pos = targetRange.range[0].getStart(f); end = ts.last(targetRange.range).getEnd(); } else { pos = targetRange.range.getStart(f); end = targetRange.range.getEnd(); } assert.equal(pos, expectedRange.pos, "incorrect pos of range"); assert.equal(end, expectedRange.end, "incorrect end of range"); } else { assert.isTrue(!result.targetRange, `expected range to extract to be undefined`); } }); } describe("unittests:: services:: extract:: extractRanges", () => { describe("get extract range from selection", () => { testExtractRange( "extractRange1", ` [#| [$|var x = 1; var y = 2;|]|] `, ); testExtractRange( "extractRange2", ` [$|[#|var x = 1; var y = 2|];|] `, ); testExtractRange( "extractRange3", ` [#|var x = [$|1|]|]; var y = 2; `, ); testExtractRange( "extractRange4", ` var x = [$|10[#|00|]|]; `, ); testExtractRange( "extractRange5", ` [$|va[#|r foo = 1; var y = 200|]0;|] `, ); testExtractRange( "extractRange6", ` var x = [$|fo[#|o.bar.baz()|]|]; `, ); testExtractRange( "extractRange7", ` if ([#|[#extracted|a && b && c && d|]|]) { } `, ); testExtractRange( "extractRange8", ` if [#|(a && b && c && d|]) { } `, ); testExtractRange( "extractRange9", ` if ([$|a[#|a && b && c && d|]d|]) { } `, ); testExtractRange( "extractRange10", ` if (a && b && c && d) { [#| [$|var x = 1; console.log(x);|] |] } `, ); testExtractRange( "extractRange11", ` [#| if (a) { return 100; } |] `, ); testExtractRange( "extractRange12", ` function foo() { [#| [$|if (a) { } return 100|] |] } `, ); testExtractRange( "extractRange13", ` [#| [$|l1: if (x) { break l1; }|]|] `, ); testExtractRange( "extractRange14", ` [#| [$|l2: { if (x) { } break l2; }|]|] `, ); testExtractRange( "extractRange15", ` while (true) { [#| if(x) { } break; |] } `, ); testExtractRange( "extractRange16", ` while (true) { [#| if(x) { } continue; |] } `, ); testExtractRange( "extractRange17", ` l3: { [#| if (x) { } break l3; |] } `, ); testExtractRange( "extractRange18", ` function f() { while (true) { [#| if (x) { return; } |] } } `, ); testExtractRange( "extractRange19", ` function f() { while (true) { [#| [$|if (x) { } return;|] |] } } `, ); testExtractRange( "extractRange20", ` function f() { return [#| [$|1 + 2|] |]+ 3; } } `, ); testExtractRange( "extractRange21", ` function f(x: number) { [#|[$|try { x++; } finally { return 1; }|]|] } `, ); // Variable statements testExtractRange("extractRange22", `[#|let x = [$|1|];|]`); testExtractRange("extractRange23", `[#|let x = [$|1|], y;|]`); testExtractRange("extractRange24", `[#|[$|let x = 1, y = 1;|]|]`); // Variable declarations testExtractRange("extractRange25", `let [#|x = [$|1|]|];`); testExtractRange("extractRange26", `let [#|x = [$|1|]|], y = 2;`); testExtractRange("extractRange27", `let x = 1, [#|y = [$|2|]|];`); // Return statements testExtractRange("extractRange28", `[#|return [$|1|];|]`); // For statements testExtractRange("extractRange29", `for ([#|var i = [$|1|]|]; i < 2; i++) {}`); testExtractRange("extractRange30", `for (var i = [#|[$|1|]|]; i < 2; i++) {}`); }); testExtractRangeFailed( "extractRangeFailed1", ` namespace A { function f() { [#| let x = 1 if (x) { return 10; } |] } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalReturnStatement.message], ); testExtractRangeFailed( "extractRangeFailed2", ` namespace A { function f() { while (true) { [#| let x = 1 if (x) { break; } |] } } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalBreakOrContinueStatements.message], ); testExtractRangeFailed( "extractRangeFailed3", ` namespace A { function f() { while (true) { [#| let x = 1 if (x) { continue; } |] } } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalBreakOrContinueStatements.message], ); testExtractRangeFailed( "extractRangeFailed4", ` namespace A { function f() { l1: { [#| let x = 1 if (x) { break l1; } |] } } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingLabeledBreakOrContinueStatementWithTargetOutsideOfTheRange.message], ); testExtractRangeFailed( "extractRangeFailed5", ` namespace A { function f() { [#| try { f2() return 10; } catch (e) { } |] } function f2() { } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalReturnStatement.message], ); testExtractRangeFailed( "extractRangeFailed6", ` namespace A { function f() { [#| try { f2() } catch (e) { return 10; } |] } function f2() { } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalReturnStatement.message], ); testExtractRangeFailed( "extractRangeFailed7", ` function test(x: number) { while (x) { x--; [#|break;|] } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalBreakOrContinueStatements.message], ); testExtractRangeFailed( "extractRangeFailed8", ` function test(x: number) { switch (x) { case 1: [#|break;|] } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalBreakOrContinueStatements.message], ); testExtractRangeFailed("extractRangeFailed9", `var x = ([#||]1 + 2);`, [ts.refactor.extractSymbol.Messages.cannotExtractEmpty.message]); testExtractRangeFailed( "extractRangeFailed10", ` function f() { return 1 + [#|2 + 3|]; } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message], ); testExtractRangeFailed( "extractRangeFailed11", ` function f(x: number) { while (true) { [#|try { x++; } finally { break; }|] } } `, [ts.refactor.extractSymbol.Messages.cannotExtractRangeContainingConditionalBreakOrContinueStatements.message], ); testExtractRangeFailed("extractRangeFailed12", `let [#|x|];`, [ts.refactor.extractSymbol.Messages.statementOrExpressionExpected.message]); testExtractRangeFailed("extractRangeFailed13", `[#|return;|]`, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message]); testExtractRangeFailed( "extractRangeFailed14", ` switch(1) { case [#|1: break;|] } `, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message], ); testExtractRangeFailed( "extractRangeFailed15", ` switch(1) { case [#|1: break|]; } `, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message], ); // Documentation only - it would be nice if the result were [$|1|] testExtractRangeFailed( "extractRangeFailed16", ` switch(1) { [#|case 1|]: break; } `, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message], ); // Documentation only - it would be nice if the result were [$|1|] testExtractRangeFailed( "extractRangeFailed17", ` switch(1) { [#|case 1:|] break; } `, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message], ); testExtractRangeFailed("extractRangeFailed18", `[#|{ 1;|] }`, [ts.refactor.extractSymbol.Messages.cannotExtractRange.message]); testExtractRangeFailed("extractRangeFailed19", `[#|/** @type {number} */|] const foo = 1;`, [ts.refactor.extractSymbol.Messages.cannotExtractJSDoc.message]); testExtractRangeFailed("extract-method-not-for-token-expression-statement", `[#|a|]`, [ts.refactor.extractSymbol.Messages.cannotExtractIdentifier.message]); });
typescript
github
https://github.com/microsoft/TypeScript
src/testRunner/unittests/services/extract/ranges.ts
#!/usr/bin/env python # Copyright (C) 2015 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import atexit import json import os import shutil import subprocess import sys import tarfile import tempfile def is_bundled(tar): # No entries for directories, so scan for a matching prefix. for entry in tar.getmembers(): if entry.name.startswith('package/node_modules/'): return True return False def bundle_dependencies(): with open('package.json') as f: package = json.load(f) package['bundledDependencies'] = package['dependencies'].keys() with open('package.json', 'w') as f: json.dump(package, f) def main(args): if len(args) != 2: print('Usage: %s <package> <version>' % sys.argv[0], file=sys.stderr) return 1 name, version = args filename = '%s-%s.tgz' % (name, version) url = 'http://registry.npmjs.org/%s/-/%s' % (name, filename) tmpdir = tempfile.mkdtemp(); tgz = os.path.join(tmpdir, filename) atexit.register(lambda: shutil.rmtree(tmpdir)) subprocess.check_call(['curl', '--proxy-anyauth', '-ksfo', tgz, url]) with tarfile.open(tgz, 'r:gz') as tar: if is_bundled(tar): print('%s already has bundled node_modules' % filename) return 1 tar.extractall(path=tmpdir) oldpwd = os.getcwd() os.chdir(os.path.join(tmpdir, 'package')) bundle_dependencies() subprocess.check_call(['npm', 'install']) subprocess.check_call(['npm', 'pack']) shutil.copy(filename, os.path.join(oldpwd, filename)) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
unknown
codeparrot/codeparrot-clean
str1 = "あ" * 1024 + "い" # not single byte optimizable str2 = "い" 100_000.times { str1.index(str2) }
ruby
github
https://github.com/ruby/ruby
benchmark/string_index.rb
from sympy.mpmath import * def test_approximation(): mp.dps = 15 f = lambda x: cos(2-2*x)/x p, err = chebyfit(f, [2, 4], 8, error=True) assert err < 1e-5 for i in range(10): x = 2 + i/5. assert abs(polyval(p, x) - f(x)) < err def test_limits(): mp.dps = 15 assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6) assert limit(lambda n: (1+1/n)**n, inf).ae(e) def test_polyval(): assert polyval([], 3) == 0 assert polyval([0], 3) == 0 assert polyval([5], 3) == 5 # 4x^3 - 2x + 5 p = [4, 0, -2, 5] assert polyval(p,4) == 253 assert polyval(p,4,derivative=True) == (253, 190) def test_polyroots(): p = polyroots([1,-4]) assert p[0].ae(4) p, q = polyroots([1,2,3]) assert p.ae(-1 - sqrt(2)*j) assert q.ae(-1 + sqrt(2)*j) #this is not a real test, it only tests a specific case assert polyroots([1]) == [] try: polyroots([0]) assert False except ValueError: pass def test_pade(): one = mpf(1) mp.dps = 20 N = 10 a = [one] k = 1 for i in range(1, N+1): k *= i a.append(one/k) p, q = pade(a, N//2, N//2) for x in arange(0, 1, 0.1): r = polyval(p[::-1], x)/polyval(q[::-1], x) assert(r.ae(exp(x), 1.0e-10)) mp.dps = 15 def test_fourier(): mp.dps = 15 c, s = fourier(lambda x: x+1, [-1, 2], 2) #plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2]) assert c[0].ae(1.5) assert c[1].ae(-3*sqrt(3)/(2*pi)) assert c[2].ae(3*sqrt(3)/(4*pi)) assert s[0] == 0 assert s[1].ae(3/(2*pi)) assert s[2].ae(3/(4*pi)) assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442) def test_differint(): mp.dps = 15 assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3)
unknown
codeparrot/codeparrot-clean
import httplib import functools from modularodm.exceptions import NoResultsFound from modularodm.storage.base import KeyExistsException from framework.auth.decorators import must_be_signed from framework.exceptions import HTTPError from website.models import User from website.models import Node from website.addons.osfstorage import model from website.addons.osfstorage import errors from website.project.decorators import ( must_not_be_registration, must_have_addon, ) def handle_odm_errors(func): @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except NoResultsFound: raise HTTPError(httplib.NOT_FOUND) except KeyExistsException: raise HTTPError(httplib.CONFLICT) except errors.VersionNotFoundError: raise HTTPError(httplib.NOT_FOUND) return wrapped def autoload_filenode(must_be=None, default_root=False): """Implies both must_have_addon osfstorage node and handle_odm_errors Attempts to load fid as a OsfStorageFileNode with viable constraints """ def _autoload_filenode(func): @handle_odm_errors @must_have_addon('osfstorage', 'node') @functools.wraps(func) def wrapped(*args, **kwargs): node_addon = kwargs['node_addon'] if 'fid' not in kwargs and default_root: file_node = node_addon.root_node else: file_node = model.OsfStorageFileNode.get(kwargs.get('fid'), node_addon) if must_be and file_node.kind != must_be: raise HTTPError(httplib.BAD_REQUEST, data={ 'message_short': 'incorrect type', 'message_long': 'FileNode must be of type {} not {}'.format(must_be, file_node.kind) }) kwargs['file_node'] = file_node return func(*args, **kwargs) return wrapped return _autoload_filenode def waterbutler_opt_hook(func): @must_be_signed @handle_odm_errors @must_not_be_registration @must_have_addon('osfstorage', 'node') @functools.wraps(func) def wrapped(payload, *args, **kwargs): try: user = User.load(payload['user']) dest_node = Node.load(payload['destination']['node']) source = model.OsfStorageFileNode.get(payload['source'], kwargs['node_addon']) dest_parent = model.OsfStorageFileNode.get_folder(payload['destination']['parent'], dest_node.get_addon('osfstorage')) kwargs.update({ 'user': user, 'source': source, 'destination': dest_parent, 'name': payload['destination']['name'], }) except KeyError: raise HTTPError(httplib.BAD_REQUEST) return func(*args, **kwargs) return wrapped
unknown
codeparrot/codeparrot-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.data_flow_ops.FIFOQueue.""" import time from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.ops import data_flow_ops from tensorflow.python.platform import test class FIFOQueueTest(xla_test.XLATestCase): def testEnqueue(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) enqueue_op = q.enqueue((10.0,)) enqueue_op.run() def testEnqueueWithShape(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2)) enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],)) enqueue_correct_op.run() with self.assertRaises(ValueError): q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],)) self.assertEqual(1, self.evaluate(q.size())) def testMultipleDequeues(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()]) self.evaluate(q.enqueue([1])) self.evaluate(q.enqueue([2])) self.evaluate(q.enqueue([3])) a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()]) self.assertAllEqual(set([1, 2, 3]), set([a, b, c])) def testQueuesDontShare(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()]) self.evaluate(q.enqueue(1)) q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()]) self.evaluate(q2.enqueue(2)) self.assertAllEqual(self.evaluate(q2.dequeue()), 2) self.assertAllEqual(self.evaluate(q.dequeue()), 1) def testEnqueueDictWithoutNames(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) with self.assertRaisesRegex(ValueError, "must have names"): q.enqueue({"a": 12.0}) def testParallelEnqueue(self): with self.session() as sess, self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() # Run one producer thread for each element in elems. def enqueue(enqueue_op): sess.run(enqueue_op) threads = [ self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops ] for thread in threads: thread.start() for thread in threads: thread.join() # Dequeue every element using a single thread. results = [] for _ in range(len(elems)): results.append(self.evaluate(dequeued_t)) self.assertItemsEqual(elems, results) def testParallelDequeue(self): with self.session() as sess, self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() # Enqueue every element using a single thread. for enqueue_op in enqueue_ops: enqueue_op.run() # Run one consumer thread for each element in elems. results = [] def dequeue(): results.append(sess.run(dequeued_t)) threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops] for thread in threads: thread.start() for thread in threads: thread.join() self.assertItemsEqual(elems, results) def testDequeue(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) elems = [10.0, 20.0, 30.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() for enqueue_op in enqueue_ops: enqueue_op.run() for i in range(len(elems)): vals = self.evaluate(dequeued_t) self.assertEqual([elems[i]], vals) def testEnqueueAndBlockingDequeue(self): with self.session() as sess, self.test_scope(): q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32) elems = [10.0, 20.0, 30.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() def enqueue(): # The enqueue_ops should run after the dequeue op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) for enqueue_op in enqueue_ops: sess.run(enqueue_op) results = [] def dequeue(): for _ in range(len(elems)): results.append(sess.run(dequeued_t)) enqueue_thread = self.checkedThread(target=enqueue) dequeue_thread = self.checkedThread(target=dequeue) enqueue_thread.start() dequeue_thread.start() enqueue_thread.join() dequeue_thread.join() for elem, result in zip(elems, results): self.assertEqual([elem], result) def testMultiEnqueueAndDequeue(self): with self.session() as sess, self.test_scope(): q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32)) elems = [(5, 10.0), (10, 20.0), (15, 30.0)] enqueue_ops = [q.enqueue((x, y)) for x, y in elems] dequeued_t = q.dequeue() for enqueue_op in enqueue_ops: enqueue_op.run() for i in range(len(elems)): x_val, y_val = sess.run(dequeued_t) x, y = elems[i] self.assertEqual([x], x_val) self.assertEqual([y], y_val) def testQueueSizeEmpty(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) self.assertEqual([0], self.evaluate(q.size())) def testQueueSizeAfterEnqueueAndDequeue(self): with self.session(), self.test_scope(): q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32) enqueue_op = q.enqueue((10.0,)) dequeued_t = q.dequeue() size = q.size() self.assertEqual([], size.get_shape()) enqueue_op.run() self.assertEqual(1, self.evaluate(size)) dequeued_t.op.run() self.assertEqual(0, self.evaluate(size)) if __name__ == "__main__": test.main()
python
github
https://github.com/tensorflow/tensorflow
tensorflow/compiler/tests/fifo_queue_test.py
#!/usr/bin/env bash set -eux export ANSIBLE_GATHERING=explicit ansible-playbook test_includes.yml -i ../../inventory "$@" ansible-playbook inherit_notify.yml "$@" echo "EXPECTED ERROR: Ensure we fail if using 'include' to include a playbook." set +e result="$(ansible-playbook -i ../../inventory include_on_playbook_should_fail.yml -v "$@" 2>&1)" set -e grep -q "'include_tasks' is not a valid attribute for a Play" <<< "$result" ansible-playbook includes_loop_rescue.yml --extra-vars strategy=linear "$@" ansible-playbook includes_loop_rescue.yml --extra-vars strategy=free "$@" ansible-playbook includes_from_dedup.yml -i ../../inventory "$@" # test 'rescueable' default (true) ansible-playbook include_role_error_handling.yml "$@" # test 'rescueable' explicit true ansible-playbook include_role_error_handling.yml "$@" -e '{"rescueme": true}' # test 'rescueable' explicit false [[ $(ansible-playbook include_role_error_handling.yml "$@" -e '{"rescueme": false}') != 0 ]] # ensure imports are not rescuable [[ $(ansible-playbook import_no_rescue.yml "$@") != 0 ]] # test for missing task_from when missing tasks/ ansible-playbook include_role_missing.yml "$@"
unknown
github
https://github.com/ansible/ansible
test/integration/targets/includes/runme.sh
# # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # # This file incorporates work covered by the following license notice: # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed # with this work for additional information regarding copyright # ownership. The ASF licenses this file to you under the Apache # License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of # the License at http://www.apache.org/licenses/LICENSE-2.0 . # import unohelper import traceback from .FaxWizardDialogImpl import FaxWizardDialogImpl, Desktop from com.sun.star.lang import XServiceInfo from com.sun.star.task import XJobExecutor # pythonloader looks for a static g_ImplementationHelper variable g_ImplementationHelper = unohelper.ImplementationHelper() g_implName = "com.sun.star.wizards.fax.CallWizard" # implement a UNO component by deriving from the standard unohelper.Base class # and from the interface(s) you want to implement. class CallWizard(unohelper.Base, XJobExecutor, XServiceInfo): def __init__(self, ctx): # store the component context for later use self.ctx = ctx def trigger(self, args): try: fw = FaxWizardDialogImpl(self.ctx.ServiceManager) fw.startWizard(self.ctx.ServiceManager) except Exception as e: print ("Wizard failure exception " + str(type(e)) + " message " + str(e) + " args " + str(e.args) + traceback.format_exc()) @classmethod def callRemote(self): #Call the wizard remotely(see README) try: ConnectStr = \ "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" xLocMSF = Desktop.connect(ConnectStr) lw = FaxWizardDialogImpl(xLocMSF) lw.startWizard(xLocMSF) except Exception as e: print ("Wizard failure exception " + str(type(e)) + " message " + str(e) + " args " + str(e.args) + traceback.format_exc()) def getImplementationName(self): return g_implName def supportsService(self, ServiceName): return g_ImplementationHelper.supportsService(g_implName, ServiceName) def getSupportedServiceNames(self): return g_ImplementationHelper.getSupportedServiceNames(g_implName) g_ImplementationHelper.addImplementation( \ CallWizard, # UNO object class g_implName, # implemtenation name ("com.sun.star.task.Job",),) # list of implemented services # (the only service) # vim:set shiftwidth=4 softtabstop=4 expandtab:
unknown
codeparrot/codeparrot-clean
import os import warnings from pathlib import Path import pytest from scrapy.utils.misc import set_environ from scrapy.utils.project import data_path, get_project_settings @pytest.fixture def proj_path(tmp_path): prev_dir = Path.cwd() project_dir = tmp_path try: os.chdir(project_dir) Path("scrapy.cfg").touch() yield project_dir finally: os.chdir(prev_dir) def test_data_path_outside_project(): assert str(Path(".scrapy", "somepath")) == data_path("somepath") abspath = str(Path(os.path.sep, "absolute", "path")) assert abspath == data_path(abspath) def test_data_path_inside_project(proj_path: Path) -> None: expected = proj_path / ".scrapy" / "somepath" assert expected.resolve() == Path(data_path("somepath")).resolve() abspath = str(Path(os.path.sep, "absolute", "path").resolve()) assert abspath == data_path(abspath) class TestGetProjectSettings: def test_valid_envvar(self): value = "tests.test_cmdline.settings" envvars = { "SCRAPY_SETTINGS_MODULE": value, } with warnings.catch_warnings(): warnings.simplefilter("error") with set_environ(**envvars): settings = get_project_settings() assert settings.get("SETTINGS_MODULE") == value def test_invalid_envvar(self): envvars = { "SCRAPY_FOO": "bar", } with set_environ(**envvars): settings = get_project_settings() assert settings.get("SCRAPY_FOO") is None def test_valid_and_invalid_envvars(self): value = "tests.test_cmdline.settings" envvars = { "SCRAPY_FOO": "bar", "SCRAPY_SETTINGS_MODULE": value, } with set_environ(**envvars): settings = get_project_settings() assert settings.get("SETTINGS_MODULE") == value assert settings.get("SCRAPY_FOO") is None
python
github
https://github.com/scrapy/scrapy
tests/test_utils_project.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # FIXME: This doesn't really provide a means for people to ask for # the service and release the service. The problem this # causes is that the selector has no simple means of shutting # down when no one is using it. # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- # """\ ====================================== NOTIFICATION OF SOCKET AND FILE EVENTS ====================================== The Selector component listens for events on sockets and sends out notifications. It is effectively a wrapper around the unix 'select' statement. Components request that the Selector component notify them when a supplied socket or file object is ready. The selectorComponent is a service that registers with the Coordinating Assistant Tracker (CAT). NOTE: The behaviour and API of this component changed in Kamaelia 0.4 and is likely to change again in the near future. Example Usage ------------- See the source code for TCPClient for an example of how the Selector component can be used. How does it work? ----------------- Selector is a service. Obtain it by calling the static method Selector.getSelectorService(...). Any existing instance will be returned, otherwise a new one is automatically created. This component ignores anything sent to its "inbox" and "control" inboxes. This component does not terminate. Register socket or file objects with the selector, to receive a one-shot notification when that file descriptor is ready. The file descriptor can be a python file object or socket object. The notification is one-shot - meaning you must resubmit your request every time you wish to receive a notification. Ensure you deregister the file object when closing the file/socket. You may do this even if you have already received the notification. The Selector component will be unable to handle notifications for any other descriptors if it still has a registered descriptor that has closed. Register for a notification by sending an one of the following messages to the "notify" inbox, as returned by Selector.getSelectorService(): * Kamaelia.KamaeliaIpc.newReader(caller, (component,inboxname), descriptor) * Kamaelia.KamaeliaIpc.newWriter(caller, (component,inboxname), descriptor) * Kamaelia.KamaeliaIpc.newExceptional(caller, (component,inboxname), descriptor) Choose which as appropriate: * a newReader() request will notify when there is data ready to be read on the descriptor * a newWriter() request will notify when writing to the descriptor will not block. * a newExceptional() request will notify when an exceptional event occurs on the specified descriptor. Selector will notify the taret component by sending the file/socket descriptor object to the target inbox the component provided. It then automatically deregisters the descriptor, unlinking from the target component's inbox. For a given descriptor for a given type of event (read/write/exceptional) only one notification will be sent when the event occurs. If multiple notification requests have been received, only the first is listened to; all others are ignored. Of course, once the notification as happened, or someone has requested that descriptor be deregistered, then someone can register for it once again. Deregister by sending on of the following messages to the "notify" inbox of Selector: * Kamaelia.KamaeliaIpc.removeReader(caller, descriptor) * Kamaelia.KamaeliaIpc.removeWriter(caller, descriptor) * Kamaelia.KamaeliaIpc.removeExceptional(caller, descriptor) It is advisable to send a deregister message when the corresponding file descriptor closes, in case you registered for a notification, but it has not occurred. """ import Axon from Axon.Ipc import shutdown import select, socket from Kamaelia.IPC import newReader, removeReader, newWriter, removeWriter, newExceptional, removeExceptional import Axon.CoordinatingAssistantTracker as cat from Axon.ThreadedComponent import threadedadaptivecommscomponent import time #import sys,traceback READERS,WRITERS, EXCEPTIONALS = 0, 1, 2 FAILHARD = False timeout = 5 class Selector(threadedadaptivecommscomponent): #Axon.AdaptiveCommsComponent.AdaptiveCommsComponent): # SmokeTests_Selector.test_SmokeTest """\ Selector() -> new Selector component Use Selector.getSelectorService(...) in preference as it returns an existing instance, or automatically creates a new one. """ Inboxes = { "control" : "Recieving a Axon.Ipc.shutdown() message here causes shutdown", "inbox" : "Not used at present", "notify" : "Used to be notified about things to select" } def __init__(self): super(Selector, self).__init__() self.trackedby = None def removeLinks(self, selectable, meta, selectables): """\ Removes a file descriptor (selectable). Removes the corresponding entry from meta and selectables; unlinks from the component to be notified; and deletes the corresponding outbox. """ # \ #print "removeLinks",selectable,meta,selectables # import pprint # print "REMOVING LINK" # pprint.pprint((selectable, meta)) try: replyService, outbox, Linkage = meta[selectable] self.unlink(thelinkage=Linkage) selectables.remove(selectable) self.deleteOutbox(outbox) del meta[selectable] Linkage = None except: pass def stop(self): if self.trackedby is not None: try: self.trackedby.deRegisterService("selector") except Axon.AxonExceptions.MultipleServiceDeletion: pass try: self.trackedby.deRegisterService("selectorshutdown") except Axon.AxonExceptions.MultipleServiceDeletion: pass super(Selector, self).stop() def addLinks(self, replyService, selectable, meta, selectables, boxBase): """\ Adds a file descriptor (selectable). Creates a corresponding outbox, with name based on boxBase; links it to the component that wants to be notified; adds the file descriptor to the set of selectables; and records the box and linkage info in meta. """ # print "ADDING LINK", replyService, selectable, meta if selectable not in meta: outbox = self.addOutbox(boxBase) L = self.link((self, outbox), replyService) meta[selectable] = replyService, outbox, L selectables.append(selectable) return L else: return meta[selectable][2] def handleNotify(self, meta, readers,writers, exceptionals): """\ Process requests to add and remove file descriptors (selectables) that arrive at the "notify" inbox. """ while self.dataReady("notify"): message = self.recv("notify") # \ #print type(message) if isinstance(message, removeReader): selectable = message.object self.removeLinks(selectable, meta[READERS], readers) message.object = None if isinstance(message, removeWriter): selectable = message.object self.removeLinks(selectable, meta[WRITERS], writers) message.object = None if isinstance(message, removeExceptional): selectable = message.object self.removeLinks(selectable, meta[EXCEPTIONALS], exceptionals) message.object = None if isinstance(message, newReader): replyService, selectable = message.object L = self.addLinks(replyService, selectable, meta[READERS], readers, "readerNotify") # print [str(x) for x in replyService], selectable # print "new reader",selectable # L.showtransit = 0 message.object = None if isinstance(message, newWriter): replyService, selectable = message.object L = self.addLinks(replyService, selectable, meta[WRITERS], writers, "writerNotify") L.showtransit = 0 message.object = None if isinstance(message, newExceptional): replyService, selectable = message.object self.addLinks(replyService, selectable, meta[EXCEPTIONALS], exceptionals, "exceptionalNotify") message.object = None def trackedBy(self, tracker): self.trackedby = tracker def main(self): """Main loop""" global timeout readers,writers, exceptionals = [],[], [] selections = [readers,writers, exceptionals] meta = [ {}, {}, {} ] if not self.anyReady(): self.sync() # momentary pause-ish thing last = 0 numberOfFailedSelectsDueToBadFileDescriptor = 0 shuttingDown = False timewithNone = 0 while 1: # SmokeTests_Selector.test_RunsForever if self.dataReady("control"): # print "recieved control message" message = self.recv("control") if isinstance(message,shutdown): # print "recieved shutdown message" shutdownStart = time.time() timeWithNooneUsing = 0 shuttingDown = True if self.trackedby is not None: # print "we are indeed tracked" try: self.trackedby.deRegisterService("selector") except Axon.AxonExceptions.MultipleServiceDeletion: pass try: self.trackedby.deRegisterService("selectorshutdown") except Axon.AxonExceptions.MultipleServiceDeletion: pass self.trackedby = None if shuttingDown: # print "we're shutting down" if len(readers) + len(writers) + len(exceptionals) == 0: if timeWithNooneUsing == 0: # print "starting timeout" timeWithNooneUsing = time.time() else: if time.time() - timeWithNooneUsing > timeout: # print "Yay, timed out!" break # exit the loop else: timeWithNooneUsing == 0 # reset this to zero if readers/writers/excepts goes up again... # else: # print "But someone is still using us...." # print readers, writers, exceptionals self.handleNotify(meta, readers,writers, exceptionals) if len(readers) + len(writers) + len(exceptionals) > 0: timewithNone = 0 try: read_write_except = select.select(readers, writers, exceptionals,0.05) #0.05 # print "RWE", readers, writers, exceptionals numberOfFailedSelectsDueToBadFileDescriptor = 0 for i in xrange(3): for selectable in read_write_except[i]: # try: replyService, outbox, linkage = meta[i][selectable] self.send(selectable, outbox) # print "sent",selectable,"to",outbox replyService, outbox, linkage = None, None, None # Note we remove the selectable until we know the reason for it being here has cleared. self.removeLinks(selectable, meta[i], selections[i]) # except KeyError, k: # pass except ValueError, e: if FAILHARD: raise e except socket.error, e: if e[0] == 9: numberOfFailedSelectsDueToBadFileDescriptor +=1 if numberOfFailedSelectsDueToBadFileDescriptor > 1000: # For the moment, we simply raise an exception. # We could brute force our way through the list of descriptors # to find the broken ones, and remove # print "We're failing here for some reason" # print "readers, writers, exceptionals", readers, writers, exceptionals raise e except select.error, e: if e[0] == 9: numberOfFailedSelectsDueToBadFileDescriptor +=1 if numberOfFailedSelectsDueToBadFileDescriptor > 1000: # For the moment, we simply raise an exception. # We could brute force our way through the list of descriptors # to find the broken ones, and remove # print "We're failing here for some reason" # print "readers, writers, exceptionals", readers, writers, exceptionals raise e self.sync() elif not self.anyReady(): # no readers, writers, or anything - wait a few moments just in case timewithNone += 1 self.pause(0.5) # pause - we're not selecting on anything, timeout becuase of shutdown timeout needs else: timewithNone += 1 # print "HMM" if timewithNone > 6: # XXXX replace with STM code break if self.trackedby is not None: try: self.trackedby.deRegisterService("selector") except Axon.AxonExceptions.MultipleServiceDeletion: pass try: self.trackedby.deRegisterService("selectorshutdown") except Axon.AxonExceptions.MultipleServiceDeletion: pass if self.trackedby is not None: try: self.trackedby.deRegisterService("selector") except Axon.AxonExceptions.MultipleServiceDeletion: pass try: self.trackedby.deRegisterService("selectorshutdown") except Axon.AxonExceptions.MultipleServiceDeletion: pass self.trackedby = None # print "SELECTOR HAS EXITTED" def setSelectorServices(selector, tracker = None): """\ Sets the given selector as the service for the selected tracker or the default one. (static method) """ if not tracker: tracker = cat.coordinatingassistanttracker.getcat() tracker.registerService("selector", selector, "notify") tracker.registerService("selectorshutdown", selector, "control") selector.trackedBy(tracker) setSelectorServices = staticmethod(setSelectorServices) def getSelectorServices(tracker=None): # STATIC METHOD """\ Returns any live selector registered with the specified (or default) tracker, or creates one for the system to use. (static method) """ if tracker is None: tracker = cat.coordinatingassistanttracker.getcat() try: service = tracker.retrieveService("selector") shutdownservice = tracker.retrieveService("selectorshutdown") return service, shutdownservice, None except KeyError: selector = Selector() Selector.setSelectorServices(selector, tracker) service=(selector,"notify") shutdownservice=(selector,"control") return service, shutdownservice, selector getSelectorServices = staticmethod(getSelectorServices) __kamaelia_components__ = ( Selector, )
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in root directory ############################################################################## from openerp import models, fields, api class WizCreateFictitiousOf(models.TransientModel): _name = "wiz.create.fictitious.of" date_planned = fields.Datetime( string='Scheduled Date', required=True, default=fields.Datetime.now) load_on_product = fields.Boolean("Load cost on product") project_id = fields.Many2one("project.project", string="Project") @api.multi def do_create_fictitious_of(self): production_obj = self.env['mrp.production'] product_obj = self.env['product.product'] routing_obj = self.env['mrp.routing'] self.ensure_one() active_ids = self.env.context['active_ids'] active_model = self.env.context['active_model'] production_list = [] if active_model == 'product.template': cond = [('product_tmpl_id', 'in', active_ids)] product_list = product_obj.search(cond) else: product_list = product_obj.browse(active_ids) for product in product_list: vals = {'product_id': product.id, 'product_template': product.product_tmpl_id.id, 'product_qty': 1, 'date_planned': self.date_planned, 'user_id': self._uid, 'active': False, 'product_uom': product.uom_id.id, 'project_id': self.project_id.id, 'analytic_account_id': ( self.project_id.analytic_account_id.id) } prod_vals = production_obj.product_id_change(product.id, 1)['value'] vals.update(prod_vals) if 'routing_id' in vals: routing = routing_obj.browse(vals['routing_id']) product_qty = production_obj._get_min_qty_for_production( routing) or 1 vals['product_qty'] = product_qty prod_vals = production_obj.product_id_change( product.id, product_qty)['value'] vals.update(prod_vals) vals['product_attributes'] = [tuple([0, 0, line]) for line in vals.get('product_attributes', [])] new_production = production_obj.create(vals) new_production.action_compute() new_production.calculate_production_estimated_cost() production_list.append(new_production.id) if self.load_on_product: for production_id in production_list: try: production = production_obj.browse(production_id) production.load_product_std_price() except: continue return {'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'mrp.production', 'type': 'ir.actions.act_window', 'domain': "[('id','in'," + str(production_list) + "), " "('active','=',False)]" }
unknown
codeparrot/codeparrot-clean
--- title: useLocation --- # useLocation <!-- ⚠️ ⚠️ IMPORTANT ⚠️ ⚠️ Thank you for helping improve our documentation! This file is auto-generated from the JSDoc comments in the source code, so please edit the JSDoc comments in the file below and this file will be re-generated once those changes are merged. https://github.com/remix-run/react-router/blob/main/packages/react-router/lib/hooks.tsx --> [MODES: framework, data, declarative] ## Summary [Reference Documentation ↗](https://api.reactrouter.com/v7/functions/react-router.useLocation.html) Returns the current [`Location`](https://api.reactrouter.com/v7/interfaces/react-router.Location.html). This can be useful if you'd like to perform some side effect whenever it changes. ```tsx import * as React from 'react' import { useLocation } from 'react-router' function SomeComponent() { let location = useLocation() React.useEffect(() => { // Google Analytics ga('send', 'pageview') }, [location]); return ( // ... ); } ``` ## Signature ```tsx function useLocation(): Location ``` ## Returns The current [`Location`](https://api.reactrouter.com/v7/interfaces/react-router.Location.html) object
unknown
github
https://github.com/remix-run/react-router
docs/api/hooks/useLocation.md
global: extra_scrape_metrics: false scrape_configs: - job_name: prometheus static_configs: - targets: ['localhost:8080']
unknown
github
https://github.com/prometheus/prometheus
config/testdata/global_disable_extra_scrape_metrics.good.yml
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> *This model was released on 2023-09-20 and added to Hugging Face Transformers on 2025-08-19.* <div style="float: right;"> <div class="flex flex-wrap space-x-1"> <img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white"> <img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat"> <img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div> </div> # KOSMOS-2.5 The Kosmos-2.5 model was proposed in [KOSMOS-2.5: A Multimodal Literate Model](https://huggingface.co/papers/2309.11419/) by Microsoft. The abstract from the paper is the following: *We present Kosmos-2.5, a multimodal literate model for machine reading of text-intensive images. Pre-trained on large-scale text-intensive images, Kosmos-2.5 excels in two distinct yet cooperative transcription tasks: (1) generating spatially-aware text blocks, where each block of text is assigned its spatial coordinates within the image, and (2) producing structured text output that captures styles and structures into the markdown format. This unified multimodal literate capability is achieved through a shared Transformer architecture, task-specific prompts, and flexible text representations. We evaluate Kosmos-2.5 on end-to-end document-level text recognition and image-to-markdown text generation. Furthermore, the model can be readily adapted for any text-intensive image understanding task with different prompts through supervised fine-tuning, making it a general-purpose tool for real-world applications involving text-rich images. This work also paves the way for the future scaling of multimodal large language models.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/kosmos2_5_ocr.png" alt="drawing" width="600"/> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/kosmos2_5_md.png" alt="drawing" width="600"/> <small> Overview of tasks that KOSMOS-2.5 can handle. Taken from the <a href="https://huggingface.co/papers/2309.11419">original paper</a>. </small> The examples below demonstrates how to generate with [`AutoModel`], for both Markdown and OCR tasks. <hfoptions id="usage"> <hfoption id="AutoModel - Markdown Task"> ```py import re import torch import requests from PIL import Image, ImageDraw from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration from accelerate import Accelerator repo = "microsoft/kosmos-2.5" device = "cuda:0" dtype = torch.bfloat16 model = Kosmos2_5ForConditionalGeneration.from_pretrained(repo, device_map=device, dtype=dtype) processor = AutoProcessor.from_pretrained(repo) # sample image url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png" image = Image.open(requests.get(url, stream=True).raw) prompt = "<md>" inputs = processor(text=prompt, images=image, return_tensors="pt") height, width = inputs.pop("height"), inputs.pop("width") raw_width, raw_height = image.size scale_height = raw_height / height scale_width = raw_width / width inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()} inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype) generated_ids = model.generate( **inputs, max_new_tokens=1024, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) print(generated_text[0]) ``` </hfoption> <hfoption id="AutoModel - OCR Task"> ```py import re import torch import requests from PIL import Image, ImageDraw from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration from accelerate import Accelerator repo = "microsoft/kosmos-2.5" device = "cuda:0" dtype = torch.bfloat16 model = Kosmos2_5ForConditionalGeneration.from_pretrained(repo, device_map=device, dtype=dtype) processor = AutoProcessor.from_pretrained(repo) # sample image url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png" image = Image.open(requests.get(url, stream=True).raw) # bs = 1 prompt = "<ocr>" inputs = processor(text=prompt, images=image, return_tensors="pt") height, width = inputs.pop("height"), inputs.pop("width") raw_width, raw_height = image.size scale_height = raw_height / height scale_width = raw_width / width # bs > 1, batch generation # inputs = processor(text=[prompt, prompt], images=[image,image], return_tensors="pt") # height, width = inputs.pop("height"), inputs.pop("width") # raw_width, raw_height = image.size # scale_height = raw_height / height[0] # scale_width = raw_width / width[0] inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()} inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype) generated_ids = model.generate( **inputs, max_new_tokens=1024, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) def post_process(y, scale_height, scale_width): y = y.replace(prompt, "") if "<md>" in prompt: return y pattern = r"<bbox><x_\d+><y_\d+><x_\d+><y_\d+></bbox>" bboxs_raw = re.findall(pattern, y) lines = re.split(pattern, y)[1:] bboxs = [re.findall(r"\d+", i) for i in bboxs_raw] bboxs = [[int(j) for j in i] for i in bboxs] info = "" for i in range(len(lines)): box = bboxs[i] x0, y0, x1, y1 = box if not (x0 >= x1 or y0 >= y1): x0 = int(x0 * scale_width) y0 = int(y0 * scale_height) x1 = int(x1 * scale_width) y1 = int(y1 * scale_height) info += f"{x0},{y0},{x1},{y0},{x1},{y1},{x0},{y1},{lines[i]}" return info output_text = post_process(generated_text[0], scale_height, scale_width) print(output_text) draw = ImageDraw.Draw(image) lines = output_text.split("\n") for line in lines: # draw the bounding box line = list(line.split(",")) if len(line) < 8: continue line = list(map(int, line[:8])) draw.polygon(line, outline="red") image.save("output.png") ``` </hfoption> </hfoptions> ## Chat version The authors also released Kosmos-2.5 Chat, which is a chat version optimized for document understanding. You can use it like so: ```python import re import torch import requests from PIL import Image, ImageDraw from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration repo = "microsoft/kosmos-2.5-chat" device = "cuda:0" dtype = torch.bfloat16 model = Kosmos2_5ForConditionalGeneration.from_pretrained(repo, device_map=device, torch_dtype=dtype, attn_implementation="flash_attention_2") processor = AutoProcessor.from_pretrained(repo) # sample image url = "https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png" image = Image.open(requests.get(url, stream=True).raw) question = "What is the sub total of the receipt?" template = "<md>A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {} ASSISTANT:" prompt = template.format(question) inputs = processor(text=prompt, images=image, return_tensors="pt") height, width = inputs.pop("height"), inputs.pop("width") raw_width, raw_height = image.size scale_height = raw_height / height scale_width = raw_width / width inputs = {k: v.to(device) if v is not None else None for k, v in inputs.items()} inputs["flattened_patches"] = inputs["flattened_patches"].to(dtype) generated_ids = model.generate( **inputs, max_new_tokens=1024, ) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) print(generated_text[0]) ``` ## Kosmos2_5Config [[autodoc]] Kosmos2_5Config ## Kosmos2_5ImageProcessor [[autodoc]] Kosmos2_5ImageProcessor - preprocess ## Kosmos2_5ImageProcessorFast [[autodoc]] Kosmos2_5ImageProcessorFast - preprocess ## Kosmos2_5Processor [[autodoc]] Kosmos2_5Processor - __call__ ## Kosmos2_5Model [[autodoc]] Kosmos2_5Model - forward ## Kosmos2_5ForConditionalGeneration [[autodoc]] Kosmos2_5ForConditionalGeneration - forward
unknown
github
https://github.com/huggingface/transformers
docs/source/en/model_doc/kosmos2_5.md
/* Copyright (c) 2007, 2025, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, as published by the Free Software Foundation. This program is designed to work with certain software (including but not limited to OpenSSL) that is licensed under separate terms, as designated in a particular file or component or in included license documentation. The authors of MySQL hereby grant you an additional permission to link the program and your derivative works with the separately licensed software that they have either included with the program or referenced in the documentation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0, for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _lf_types_h #define _lf_types_h #include "my_inttypes.h" #define LF_HASH_UNIQUE 1 #define MY_LF_ERRPTR ((void *)(intptr)1) /** Callback for extracting key and key length from user data in a LF_HASH. @param arg Pointer to user data. @param[out] length Store key length here. @return Pointer to key to be hashed. @note Was my_hash_get_key, with lots of C-style casting when calling my_hash_init. Renamed to force build error (since signature changed) in case someone keeps following that coding style. */ typedef const uchar *(*hash_get_key_function)(const uchar *arg, size_t *length); /* memory allocator, lf_alloc-pin.c */ typedef void lf_allocator_func(uchar *); typedef int lf_hash_match_func(const uchar *el, void *arg); typedef void lf_hash_init_func(uchar *dst, const uchar *src); #endif
c
github
https://github.com/mysql/mysql-server
include/lf_types.h
import lldb import re import testutils as test def runScenario(assembly, debugger, target): process = target.GetProcess() res = lldb.SBCommandReturnObject() ci = debugger.GetCommandInterpreter() # Run debugger, wait until libcoreclr is loaded, # set breakpoint at Test.Main and stop there test.stop_in_main(debugger, assembly) ci.HandleCommand("name2ee " + assembly + " Test.Main", res) print(res.GetOutput()) print(res.GetError()) # Interpreter must have this command and able to run it test.assertTrue(res.Succeeded()) output = res.GetOutput() # Output is not empty test.assertTrue(len(output) > 0) match = re.search('JITTED Code Address:\s+([0-9a-fA-F]+)', output) # Line matched test.assertTrue(match) groups = match.groups() # Match has a single subgroup test.assertEqual(len(groups), 1) jit_addr = groups[0] # Address must be a hex number test.assertTrue(test.is_hexnum(jit_addr)) ci.HandleCommand("ip2md " + jit_addr, res) print(res.GetOutput()) print(res.GetError()) # Interpreter must have this command and able to run it test.assertTrue(res.Succeeded()) output = res.GetOutput() # Output is not empty test.assertTrue(len(output) > 0) # Specific string must be in the output test.assertNotEqual(output.find("MethodDesc:"), -1) # TODO: test other use cases # Continue current process and checks its exit code test.exit_lldb(debugger, assembly)
unknown
codeparrot/codeparrot-clean
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.click', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## log.h (module 'core'): ns3::LogLevel [enumeration] module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core') ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] module.add_class('Inet6SocketAddress', import_from_module='ns.network') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] module.add_class('InetSocketAddress', import_from_module='ns.network') ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class] module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration] module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## log.h (module 'core'): ns3::LogComponent [class] module.add_class('LogComponent', import_from_module='ns.core') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## non-copyable.h (module 'core'): ns3::NonCopyable [class] module.add_class('NonCopyable', destructor_visibility='protected', import_from_module='ns.core') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## log.h (module 'core'): ns3::ParameterLogger [class] module.add_class('ParameterLogger', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class] module.add_class('SystemWallClockMs', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class] module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header']) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration] module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration] module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::SocketPriority [enumeration] module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::Socket::Ipv6MulticastFilterMode [enumeration] module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network') ## socket.h (module 'network'): ns3::SocketIpTosTag [class] module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class] module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class] module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketPriorityTag [class] module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## ipv4.h (module 'internet'): ns3::Ipv4 [class] module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface [class] module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv4-l3-click-protocol.h (module 'click'): ns3::Ipv4L3ClickProtocol [class] module.add_class('Ipv4L3ClickProtocol', parent=root_module['ns3::Ipv4']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class] module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class] module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class] module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-click-routing.h (module 'click'): ns3::Ipv4ClickRouting [class] module.add_class('Ipv4ClickRouting', parent=root_module['ns3::Ipv4RoutingProtocol']) module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map') module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector') module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *', u'ns3::LogTimePrinter') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) **', u'ns3::LogTimePrinter*') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *&', u'ns3::LogTimePrinter&') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *', u'ns3::LogNodePrinter') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) **', u'ns3::LogNodePrinter*') typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *&', u'ns3::LogNodePrinter&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >']) register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag']) register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag']) register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4Interface_methods(root_module, root_module['ns3::Ipv4Interface']) register_Ns3Ipv4L3ClickProtocol_methods(root_module, root_module['ns3::Ipv4L3ClickProtocol']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute']) register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route']) register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3Ipv4ClickRouting_methods(root_module, root_module['ns3::Ipv4ClickRouting']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function] cls.add_method('GetRemainingSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Inet6SocketAddress_methods(root_module, cls): ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor] cls.add_constructor([param('char const *', 'ipv6')]) ## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function] cls.add_method('ConvertFrom', 'ns3::Inet6SocketAddress', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function] cls.add_method('GetIpv6', 'ns3::Ipv6Address', [], is_const=True) ## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function] cls.add_method('SetIpv6', 'void', [param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3InetSocketAddress_methods(root_module, cls): ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor] cls.add_constructor([param('char const *', 'ipv4')]) ## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::InetSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function] cls.add_method('GetIpv4', 'ns3::Ipv4Address', [], is_const=True) ## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet-socket-address.h (module 'network'): uint8_t ns3::InetSocketAddress::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ipv4Address', 'address')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor] cls.add_constructor([]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [copy constructor] cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')]) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function] cls.add_method('GetLocal', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function] cls.add_method('GetMask', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function] cls.add_method('GetScope', 'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function] cls.add_method('IsSecondary', 'bool', [], is_const=True) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function] cls.add_method('SetBroadcast', 'void', [param('ns3::Ipv4Address', 'broadcast')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function] cls.add_method('SetLocal', 'void', [param('ns3::Ipv4Address', 'local')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function] cls.add_method('SetMask', 'void', [param('ns3::Ipv4Mask', 'mask')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function] cls.add_method('SetPrimary', 'void', []) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SetScope', 'void', [param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')]) ## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function] cls.add_method('SetSecondary', 'void', []) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3LogComponent_methods(root_module, cls): ## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [copy constructor] cls.add_constructor([param('ns3::LogComponent const &', 'arg0')]) ## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LOG_NONE) [constructor] cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LOG_NONE')]) ## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function] cls.add_method('Disable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function] cls.add_method('Enable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function] cls.add_method('File', 'std::string', [], is_const=True) ## log.h (module 'core'): static std::map<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,ns3::LogComponent*,std::less<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >,std::allocator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, ns3::LogComponent*> > > * ns3::LogComponent::GetComponentList() [member function] cls.add_method('GetComponentList', 'std::map< std::string, ns3::LogComponent * > *', [], is_static=True) ## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function] cls.add_method('GetLevelLabel', 'std::string', [param('ns3::LogLevel const', 'level')], is_static=True) ## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function] cls.add_method('IsEnabled', 'bool', [param('ns3::LogLevel const', 'level')], is_const=True) ## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function] cls.add_method('IsNoneEnabled', 'bool', [], is_const=True) ## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function] cls.add_method('Name', 'char const *', [], is_const=True) ## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function] cls.add_method('SetMask', 'void', [param('ns3::LogLevel const', 'level')]) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NonCopyable_methods(root_module, cls): ## non-copyable.h (module 'core'): ns3::NonCopyable::NonCopyable() [constructor] cls.add_constructor([], visibility='protected') return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3ParameterLogger_methods(root_module, cls): ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [copy constructor] cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')]) ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor] cls.add_constructor([param('std::ostream &', 'os')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SystemWallClockMs_methods(root_module, cls): ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')]) ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor] cls.add_constructor([]) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function] cls.add_method('End', 'int64_t', []) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function] cls.add_method('GetElapsedReal', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function] cls.add_method('GetElapsedSystem', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function] cls.add_method('GetElapsedUser', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Ipv4Header_methods(root_module, cls): ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')]) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor] cls.add_constructor([]) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function] cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True) ## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function] cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function] cls.add_method('EnableChecksum', 'void', []) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function] cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function] cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function] cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function] cls.add_method('GetIdentification', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function] cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function] cls.add_method('IsChecksumOk', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function] cls.add_method('IsDontFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function] cls.add_method('IsLastFragment', 'bool', [], is_const=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function] cls.add_method('SetDontFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function] cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function] cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function] cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function] cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function] cls.add_method('SetLastFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function] cls.add_method('SetMayFragment', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function] cls.add_method('SetMoreFragments', 'void', []) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function] cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function] cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) ## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function] cls.add_method('GetIpTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function] cls.add_method('GetIpTtl', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function] cls.add_method('GetIpv6HopLimit', 'uint8_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function] cls.add_method('GetIpv6Tclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetPeerName(ns3::Address & address) const [member function] cls.add_method('GetPeerName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::Socket::GetPriority() const [member function] cls.add_method('GetPriority', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): static uint8_t ns3::Socket::IpTos2Priority(uint8_t ipTos) [member function] cls.add_method('IpTos2Priority', 'uint8_t', [param('uint8_t', 'ipTos')], is_static=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address, ns3::Socket::Ipv6MulticastFilterMode filterMode, std::vector<ns3::Ipv6Address,std::allocator<ns3::Ipv6Address> > sourceAddresses) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address'), param('ns3::Socket::Ipv6MulticastFilterMode', 'filterMode'), param('std::vector< ns3::Ipv6Address >', 'sourceAddresses')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address) [member function] cls.add_method('Ipv6JoinGroup', 'void', [param('ns3::Ipv6Address', 'address')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::Ipv6LeaveGroup() [member function] cls.add_method('Ipv6LeaveGroup', 'void', [], is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function] cls.add_method('IsIpRecvTos', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function] cls.add_method('IsIpRecvTtl', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function] cls.add_method('IsIpv6RecvHopLimit', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function] cls.add_method('IsIpv6RecvTclass', 'bool', [], is_const=True) ## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function] cls.add_method('IsRecvPktInfo', 'bool', [], is_const=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function] cls.add_method('SetIpRecvTos', 'void', [param('bool', 'ipv4RecvTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function] cls.add_method('SetIpRecvTtl', 'void', [param('bool', 'ipv4RecvTtl')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function] cls.add_method('SetIpTos', 'void', [param('uint8_t', 'ipTos')]) ## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function] cls.add_method('SetIpTtl', 'void', [param('uint8_t', 'ipTtl')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function] cls.add_method('SetIpv6HopLimit', 'void', [param('uint8_t', 'ipHopLimit')], is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function] cls.add_method('SetIpv6RecvHopLimit', 'void', [param('bool', 'ipv6RecvHopLimit')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function] cls.add_method('SetIpv6RecvTclass', 'void', [param('bool', 'ipv6RecvTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function] cls.add_method('SetIpv6Tclass', 'void', [param('int', 'ipTclass')]) ## socket.h (module 'network'): void ns3::Socket::SetPriority(uint8_t priority) [member function] cls.add_method('SetPriority', 'void', [param('uint8_t', 'priority')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function] cls.add_method('IsManualIpTtl', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function] cls.add_method('IsManualIpv6HopLimit', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function] cls.add_method('IsManualIpv6Tclass', 'bool', [], is_const=True, visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketIpTosTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function] cls.add_method('GetTos', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function] cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')]) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'hopLimit')]) return def register_Ns3SocketIpv6TclassTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function] cls.add_method('GetTclass', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function] cls.add_method('SetTclass', 'void', [param('uint8_t', 'tclass')]) return def register_Ns3SocketPriorityTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag(ns3::SocketPriorityTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketPriorityTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketPriorityTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketPriorityTag::GetPriority() const [member function] cls.add_method('GetPriority', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): uint32_t ns3::SocketPriorityTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketPriorityTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketPriorityTag::SetPriority(uint8_t priority) [member function] cls.add_method('SetPriority', 'void', [param('uint8_t', 'priority')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4_methods(root_module, cls): ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')]) ## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor] cls.add_constructor([]) ## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddInterface', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function] cls.add_method('CreateRawSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function] cls.add_method('DeleteRawSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function] cls.add_method('GetInterfaceForAddress', 'int32_t', [param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function] cls.add_method('GetInterfaceForDevice', 'int32_t', [param('ns3::Ptr< ns3::NetDevice const >', 'device')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function] cls.add_method('GetInterfaceForPrefix', 'int32_t', [param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function] cls.add_method('GetMetric', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function] cls.add_method('GetMtu', 'uint16_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function] cls.add_method('GetNAddresses', 'uint32_t', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function] cls.add_method('GetNInterfaces', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function] cls.add_method('GetNetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function] cls.add_method('GetProtocol', 'ns3::Ptr< ns3::IpL4Protocol >', [param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function] cls.add_method('GetRoutingProtocol', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Insert', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function] cls.add_method('IsDestinationAddress', 'bool', [param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function] cls.add_method('IsForwarding', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function] cls.add_method('IsUp', 'bool', [param('uint32_t', 'interface')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function] cls.add_method('Remove', 'void', [param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function] cls.add_method('RemoveAddress', 'bool', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function] cls.add_method('SelectSourceAddress', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function] cls.add_method('SendWithHeader', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function] cls.add_method('SetDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function] cls.add_method('SetForwarding', 'void', [param('uint32_t', 'interface'), param('bool', 'val')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint32_t', 'interface'), param('uint16_t', 'metric')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function] cls.add_method('SetRoutingProtocol', 'void', [param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function] cls.add_method('SetUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function] cls.add_method('SourceAddressSelection', 'ns3::Ipv4Address', [param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')], is_pure_virtual=True, is_virtual=True) ## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable] cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function] cls.add_method('GetIpForward', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function] cls.add_method('GetWeakEsModel', 'bool', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function] cls.add_method('SetIpForward', 'void', [param('bool', 'forward')], is_pure_virtual=True, visibility='private', is_virtual=True) ## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function] cls.add_method('SetWeakEsModel', 'void', [param('bool', 'model')], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4Interface_methods(root_module, cls): ## ipv4-interface.h (module 'internet'): static ns3::TypeId ns3::Ipv4Interface::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-interface.h (module 'internet'): ns3::Ipv4Interface::Ipv4Interface() [constructor] cls.add_constructor([]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('SetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetTrafficControl(ns3::Ptr<ns3::TrafficControlLayer> tc) [member function] cls.add_method('SetTrafficControl', 'void', [param('ns3::Ptr< ns3::TrafficControlLayer >', 'tc')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetArpCache(ns3::Ptr<ns3::ArpCache> arpCache) [member function] cls.add_method('SetArpCache', 'void', [param('ns3::Ptr< ns3::ArpCache >', 'arpCache')]) ## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Interface::GetDevice() const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-interface.h (module 'internet'): ns3::Ptr<ns3::ArpCache> ns3::Ipv4Interface::GetArpCache() const [member function] cls.add_method('GetArpCache', 'ns3::Ptr< ns3::ArpCache >', [], is_const=True) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetMetric(uint16_t metric) [member function] cls.add_method('SetMetric', 'void', [param('uint16_t', 'metric')]) ## ipv4-interface.h (module 'internet'): uint16_t ns3::Ipv4Interface::GetMetric() const [member function] cls.add_method('GetMetric', 'uint16_t', [], is_const=True) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsUp() const [member function] cls.add_method('IsUp', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsDown() const [member function] cls.add_method('IsDown', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetUp() [member function] cls.add_method('SetUp', 'void', []) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetDown() [member function] cls.add_method('SetDown', 'void', []) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::IsForwarding() const [member function] cls.add_method('IsForwarding', 'bool', [], is_const=True) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::SetForwarding(bool val) [member function] cls.add_method('SetForwarding', 'void', [param('bool', 'val')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::Send(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & hdr, ns3::Ipv4Address dest) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'hdr'), param('ns3::Ipv4Address', 'dest')]) ## ipv4-interface.h (module 'internet'): bool ns3::Ipv4Interface::AddAddress(ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('AddAddress', 'bool', [param('ns3::Ipv4InterfaceAddress', 'address')]) ## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::GetAddress(uint32_t index) const [member function] cls.add_method('GetAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'index')], is_const=True) ## ipv4-interface.h (module 'internet'): uint32_t ns3::Ipv4Interface::GetNAddresses() const [member function] cls.add_method('GetNAddresses', 'uint32_t', [], is_const=True) ## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::RemoveAddress(uint32_t index) [member function] cls.add_method('RemoveAddress', 'ns3::Ipv4InterfaceAddress', [param('uint32_t', 'index')]) ## ipv4-interface.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4Interface::RemoveAddress(ns3::Ipv4Address address) [member function] cls.add_method('RemoveAddress', 'ns3::Ipv4InterfaceAddress', [param('ns3::Ipv4Address', 'address')]) ## ipv4-interface.h (module 'internet'): void ns3::Ipv4Interface::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3Ipv4L3ClickProtocol_methods(root_module, cls): ## ipv4-l3-click-protocol.h (module 'click'): ns3::Ipv4L3ClickProtocol::Ipv4L3ClickProtocol() [constructor] cls.add_constructor([]) ## ipv4-l3-click-protocol.h (module 'click'): ns3::Ipv4L3ClickProtocol::Ipv4L3ClickProtocol(ns3::Ipv4L3ClickProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4L3ClickProtocol const &', 'arg0')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv4MulticastRoute_methods(root_module, cls): ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function] cls.add_method('GetGroup', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function] cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<unsigned int const, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function] cls.add_method('GetOutputTtlMap', 'std::map< unsigned int, unsigned int >', [], is_const=True) ## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function] cls.add_method('GetParent', 'uint32_t', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function] cls.add_method('SetGroup', 'void', [param('ns3::Ipv4Address const', 'group')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function] cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address const', 'origin')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function] cls.add_method('SetOutputTtl', 'void', [param('uint32_t', 'oif'), param('uint32_t', 'ttl')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function] cls.add_method('SetParent', 'void', [param('uint32_t', 'iif')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable] cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable] cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True) return def register_Ns3Ipv4Route_methods(root_module, cls): cls.add_output_stream_operator() ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor] cls.add_constructor([]) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function] cls.add_method('GetGateway', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function] cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True) ## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'dest')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function] cls.add_method('SetGateway', 'void', [param('ns3::Ipv4Address', 'gw')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function] cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')]) ## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function] cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'src')]) return def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls): ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor] cls.add_constructor([]) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')]) ## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyAddAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function] cls.add_method('NotifyInterfaceDown', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function] cls.add_method('NotifyInterfaceUp', 'void', [param('uint32_t', 'interface')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function] cls.add_method('NotifyRemoveAddress', 'void', [param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::S) const [member function] cls.add_method('PrintRoutingTable', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::S')], is_pure_virtual=True, is_const=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Callback<void,ns3::Ptr<ns3::Ipv4Route>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ucb, ns3::Callback<void,ns3::Ptr<ns3::Ipv4MulticastRoute>,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> mcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,unsigned int,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> lcb, ns3::Callback<void,ns3::Ptr<const ns3::Packet>,const ns3::Ipv4Header&,ns3::Socket::SocketErrno,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ecb) [member function] cls.add_method('RouteInput', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function] cls.add_method('RouteOutput', 'ns3::Ptr< ns3::Ipv4Route >', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')], is_pure_virtual=True, is_virtual=True) ## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')], is_pure_virtual=True, is_virtual=True) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3Ipv4ClickRouting_methods(root_module, cls): ## ipv4-click-routing.h (module 'click'): ns3::Ipv4ClickRouting::Ipv4ClickRouting() [constructor] cls.add_constructor([]) ## ipv4-click-routing.h (module 'click'): ns3::Ipv4ClickRouting::Ipv4ClickRouting(ns3::Ipv4ClickRouting const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4ClickRouting const &', 'arg0')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
import * as React from "react"; import * as ReactDOM from "react-dom/client"; import App from "./app"; ReactDOM.createRoot(document.getElementById("root")).render( <React.StrictMode> <App /> </React.StrictMode>, );
typescript
github
https://github.com/remix-run/react-router
examples/error-boundaries/src/main.tsx
import decimal import json import re from django.core import serializers from django.core.serializers.base import DeserializationError from django.db import models from django.test import TestCase, TransactionTestCase from django.test.utils import isolate_apps from .models import Score from .tests import SerializersTestBase, SerializersTransactionTestBase class JsonlSerializerTestCase(SerializersTestBase, TestCase): serializer_name = "jsonl" pkless_str = [ '{"pk": null,"model": "serializers.category","fields": {"name": "Reference"}}', '{"model": "serializers.category","fields": {"name": "Non-fiction"}}', ] pkless_str = "\n".join([s.replace("\n", "") for s in pkless_str]) mapping_ordering_str = ( '{"model": "serializers.article","pk": %(article_pk)s,' '"fields": {' '"author": %(author_pk)s,' '"headline": "Poker has no place on ESPN",' '"pub_date": "2006-06-16T11:00:00",' '"categories": [%(first_category_pk)s,%(second_category_pk)s],' '"meta_data": [],' '"topics": []}}\n' ) @staticmethod def _validate_output(serial_str): try: for line in serial_str.split("\n"): if line: json.loads(line) except Exception: return False else: return True @staticmethod def _get_pk_values(serial_str): serial_list = [json.loads(line) for line in serial_str.split("\n") if line] return [obj_dict["pk"] for obj_dict in serial_list] @staticmethod def _get_field_values(serial_str, field_name): serial_list = [json.loads(line) for line in serial_str.split("\n") if line] return [ obj_dict["fields"][field_name] for obj_dict in serial_list if field_name in obj_dict["fields"] ] def test_no_indentation(self): s = serializers.jsonl.Serializer() json_data = s.serialize([Score(score=5.0), Score(score=6.0)], indent=2) for line in json_data.splitlines(): self.assertIsNone(re.search(r".+,\s*$", line)) @isolate_apps("serializers") def test_custom_encoder(self): class ScoreDecimal(models.Model): score = models.DecimalField() class CustomJSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): return str(o) return super().default(o) s = serializers.jsonl.Serializer() json_data = s.serialize( [ScoreDecimal(score=decimal.Decimal(1.0))], cls=CustomJSONEncoder, ) self.assertIn('"fields": {"score": "1"}', json_data) def test_json_deserializer_exception(self): with self.assertRaises(DeserializationError): for obj in serializers.deserialize("jsonl", """[{"pk":1}"""): pass def test_helpful_error_message_invalid_pk(self): """ If there is an invalid primary key, the error message contains the model associated with it. """ test_string = ( '{"pk": "badpk","model": "serializers.player",' '"fields": {"name": "Bob","rank": 1,"team": "Team"}}' ) with self.assertRaisesMessage( DeserializationError, "(serializers.player:pk=badpk)" ): list(serializers.deserialize("jsonl", test_string)) def test_helpful_error_message_invalid_field(self): """ If there is an invalid field value, the error message contains the model associated with it. """ test_string = ( '{"pk": "1","model": "serializers.player",' '"fields": {"name": "Bob","rank": "invalidint","team": "Team"}}' ) expected = "(serializers.player:pk=1) field_value was 'invalidint'" with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("jsonl", test_string)) def test_helpful_error_message_for_foreign_keys(self): """ Invalid foreign keys with a natural key throws a helpful error message, such as what the failing key is. """ test_string = ( '{"pk": 1, "model": "serializers.category",' '"fields": {' '"name": "Unknown foreign key",' '"meta_data": ["doesnotexist","metadata"]}}' ) key = ["doesnotexist", "metadata"] expected = "(serializers.category:pk=1) field_value was '%r'" % key with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("jsonl", test_string)) def test_helpful_error_message_for_many2many_non_natural(self): """ Invalid many-to-many keys throws a helpful error message. """ test_strings = [ """{ "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "categories": [1, "doesnotexist"] } }""", """{ "pk": 1, "model": "serializers.author", "fields": {"name": "Agnes"} }""", """{ "pk": 1, "model": "serializers.category", "fields": {"name": "Reference"} }""", ] test_string = "\n".join([s.replace("\n", "") for s in test_strings]) expected = "(serializers.article:pk=1) field_value was 'doesnotexist'" with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("jsonl", test_string)) def test_helpful_error_message_for_many2many_natural1(self): """ Invalid many-to-many keys throws a helpful error message where one of a list of natural keys is invalid. """ test_strings = [ """{ "pk": 1, "model": "serializers.categorymetadata", "fields": {"kind": "author","name": "meta1","value": "Agnes"} }""", """{ "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "meta_data": [ ["author", "meta1"], ["doesnotexist", "meta1"], ["author", "meta1"] ] } }""", """{ "pk": 1, "model": "serializers.author", "fields": {"name": "Agnes"} }""", ] test_string = "\n".join([s.replace("\n", "") for s in test_strings]) key = ["doesnotexist", "meta1"] expected = "(serializers.article:pk=1) field_value was '%r'" % key with self.assertRaisesMessage(DeserializationError, expected): for obj in serializers.deserialize("jsonl", test_string): obj.save() def test_helpful_error_message_for_many2many_natural2(self): """ Invalid many-to-many keys throws a helpful error message where a natural many-to-many key has only a single value. """ test_strings = [ """{ "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "meta_data": [1, "doesnotexist"] } }""", """{ "pk": 1, "model": "serializers.categorymetadata", "fields": {"kind": "author","name": "meta1","value": "Agnes"} }""", """{ "pk": 1, "model": "serializers.author", "fields": {"name": "Agnes"} }""", ] test_string = "\n".join([s.replace("\n", "") for s in test_strings]) expected = "(serializers.article:pk=1) field_value was 'doesnotexist'" with self.assertRaisesMessage(DeserializationError, expected): for obj in serializers.deserialize("jsonl", test_string, ignore=False): obj.save() def test_helpful_error_message_for_many2many_not_iterable(self): """ Not iterable many-to-many field value throws a helpful error message. """ test_string = ( '{"pk": 1,"model": "serializers.m2mdata","fields": {"data": null}}' ) expected = "(serializers.m2mdata:pk=1) field_value was 'None'" with self.assertRaisesMessage(DeserializationError, expected): next(serializers.deserialize("jsonl", test_string, ignore=False)) class JsonSerializerTransactionTestCase( SerializersTransactionTestBase, TransactionTestCase ): serializer_name = "jsonl" fwd_ref_str = [ """{ "pk": 1, "model": "serializers.article", "fields": { "headline": "Forward references pose no problem", "pub_date": "2006-06-16T15:00:00", "categories": [1], "author": 1 } }""", """{ "pk": 1, "model": "serializers.category", "fields": {"name": "Reference"} }""", """{ "pk": 1, "model": "serializers.author", "fields": {"name": "Agnes"} }""", ] fwd_ref_str = "\n".join([s.replace("\n", "") for s in fwd_ref_str])
python
github
https://github.com/django/django
tests/serializers/test_jsonl.py
/* * Copyright (C) 2016 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.graph; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.graph.Graphs.checkNonNegative; import com.google.common.annotations.Beta; import com.google.common.base.Optional; import com.google.errorprone.annotations.CanIgnoreReturnValue; /** * A builder for constructing instances of {@link MutableNetwork} or {@link ImmutableNetwork} with * user-defined properties. * * <p>A {@code Network} built by this class has the following default properties: * * <ul> * <li>does not allow parallel edges * <li>does not allow self-loops * <li>orders {@link Network#nodes()} and {@link Network#edges()} in the order in which the * elements were added (insertion order) * </ul> * * <p>{@code Network}s built by this class also guarantee that each collection-returning accessor * returns a <b>(live) unmodifiable view</b>; see <a * href="https://github.com/google/guava/wiki/GraphsExplained#accessor-behavior">the external * documentation</a> for details. * * <p>Examples of use: * * {@snippet : * // Building a mutable network * MutableNetwork<String, Integer> network = * NetworkBuilder.directed().allowsParallelEdges(true).build(); * flightNetwork.addEdge("LAX", "ATL", 3025); * flightNetwork.addEdge("LAX", "ATL", 1598); * flightNetwork.addEdge("ATL", "LAX", 2450); * * // Building a immutable network * ImmutableNetwork<String, Integer> immutableNetwork = * NetworkBuilder.directed() * .allowsParallelEdges(true) * .<String, Integer>immutable() * .addEdge("LAX", "ATL", 3025) * .addEdge("LAX", "ATL", 1598) * .addEdge("ATL", "LAX", 2450) * .build(); * } * * @author James Sexton * @author Joshua O'Madadhain * @param <N> The most general node type this builder will support. This is normally {@code Object} * unless it is constrained by using a method like {@link #nodeOrder}, or the builder is * constructed based on an existing {@code Network} using {@link #from(Network)}. * @param <E> The most general edge type this builder will support. This is normally {@code Object} * unless it is constrained by using a method like {@link #edgeOrder}, or the builder is * constructed based on an existing {@code Network} using {@link #from(Network)}. * @since 20.0 */ @Beta public final class NetworkBuilder<N, E> extends AbstractGraphBuilder<N> { boolean allowsParallelEdges = false; ElementOrder<? super E> edgeOrder = ElementOrder.insertion(); Optional<Integer> expectedEdgeCount = Optional.absent(); /** Creates a new instance with the specified edge directionality. */ private NetworkBuilder(boolean directed) { super(directed); } /** Returns a {@link NetworkBuilder} for building directed networks. */ public static NetworkBuilder<Object, Object> directed() { return new NetworkBuilder<>(true); } /** Returns a {@link NetworkBuilder} for building undirected networks. */ public static NetworkBuilder<Object, Object> undirected() { return new NetworkBuilder<>(false); } /** * Returns a {@link NetworkBuilder} initialized with all properties queryable from {@code * network}. * * <p>The "queryable" properties are those that are exposed through the {@link Network} interface, * such as {@link Network#isDirected()}. Other properties, such as {@link * #expectedNodeCount(int)}, are not set in the new builder. */ public static <N, E> NetworkBuilder<N, E> from(Network<N, E> network) { return new NetworkBuilder<N, E>(network.isDirected()) .allowsParallelEdges(network.allowsParallelEdges()) .allowsSelfLoops(network.allowsSelfLoops()) .nodeOrder(network.nodeOrder()) .edgeOrder(network.edgeOrder()); } /** * Returns an {@link ImmutableNetwork.Builder} with the properties of this {@link NetworkBuilder}. * * <p>The returned builder can be used for populating an {@link ImmutableNetwork}. * * @since 28.0 */ public <N1 extends N, E1 extends E> ImmutableNetwork.Builder<N1, E1> immutable() { NetworkBuilder<N1, E1> castBuilder = cast(); return new ImmutableNetwork.Builder<>(castBuilder); } /** * Specifies whether the network will allow parallel edges. Attempting to add a parallel edge to a * network that does not allow them will throw an {@link UnsupportedOperationException}. * * <p>The default value is {@code false}. */ @CanIgnoreReturnValue public NetworkBuilder<N, E> allowsParallelEdges(boolean allowsParallelEdges) { this.allowsParallelEdges = allowsParallelEdges; return this; } /** * Specifies whether the network will allow self-loops (edges that connect a node to itself). * Attempting to add a self-loop to a network that does not allow them will throw an {@link * UnsupportedOperationException}. * * <p>The default value is {@code false}. */ @CanIgnoreReturnValue public NetworkBuilder<N, E> allowsSelfLoops(boolean allowsSelfLoops) { this.allowsSelfLoops = allowsSelfLoops; return this; } /** * Specifies the expected number of nodes in the network. * * @throws IllegalArgumentException if {@code expectedNodeCount} is negative */ @CanIgnoreReturnValue public NetworkBuilder<N, E> expectedNodeCount(int expectedNodeCount) { this.expectedNodeCount = Optional.of(checkNonNegative(expectedNodeCount)); return this; } /** * Specifies the expected number of edges in the network. * * @throws IllegalArgumentException if {@code expectedEdgeCount} is negative */ @CanIgnoreReturnValue public NetworkBuilder<N, E> expectedEdgeCount(int expectedEdgeCount) { this.expectedEdgeCount = Optional.of(checkNonNegative(expectedEdgeCount)); return this; } /** * Specifies the order of iteration for the elements of {@link Network#nodes()}. * * <p>The default value is {@link ElementOrder#insertion() insertion order}. */ public <N1 extends N> NetworkBuilder<N1, E> nodeOrder(ElementOrder<N1> nodeOrder) { NetworkBuilder<N1, E> newBuilder = cast(); newBuilder.nodeOrder = checkNotNull(nodeOrder); return newBuilder; } /** * Specifies the order of iteration for the elements of {@link Network#edges()}. * * <p>The default value is {@link ElementOrder#insertion() insertion order}. */ public <E1 extends E> NetworkBuilder<N, E1> edgeOrder(ElementOrder<E1> edgeOrder) { NetworkBuilder<N, E1> newBuilder = cast(); newBuilder.edgeOrder = checkNotNull(edgeOrder); return newBuilder; } /** Returns an empty {@link MutableNetwork} with the properties of this {@link NetworkBuilder}. */ public <N1 extends N, E1 extends E> MutableNetwork<N1, E1> build() { return new StandardMutableNetwork<>(this); } @SuppressWarnings("unchecked") private <N1 extends N, E1 extends E> NetworkBuilder<N1, E1> cast() { return (NetworkBuilder<N1, E1>) this; } }
java
github
https://github.com/google/guava
android/guava/src/com/google/common/graph/NetworkBuilder.java
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ ## @file """ from PIL import ImageOps from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter class Flip(BaseFilter): """ Flips the image. """ def __init__(self, difficulty = 0.5, seed=None, reproducible=False): """ @param seed -- Seed value for random number generator, to produce reproducible results. @param reproducible -- Whether to seed the random number generator based on a hash of the image pixels upon each call to process(). 'seed' and 'reproducible' cannot be used together. """ BaseFilter.__init__(self, seed, reproducible) def process(self, image): """ @param image -- The image to process. Returns a single image, or a list containing one or more images. """ BaseFilter.process(self, image) newImage = ImageOps.flip(image) return newImage
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for image ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.platform import test class ResizeBilinearTest(XLATestCase): def _assertForwardOpMatchesExpected(self, image_np, target_shape, expected=None): if expected is None: self.fail("expected must be specified") with self.test_session() as sess, self.test_scope(): image = array_ops.placeholder(image_np.dtype) resized = gen_image_ops.resize_bilinear( image, target_shape, align_corners=True) out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]}) self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out) def _assertBackwardOpMatchesExpected(self, grads_np, input_shape=None, dtype=None, expected=None): if input_shape is None: self.fail("input_shape must be specified") if expected is None: self.fail("expected must be specified") with self.test_session() as sess, self.test_scope(): dtype = dtype or np.float32 grads = array_ops.placeholder(np.float32) resized = gen_image_ops._resize_bilinear_grad( grads, np.zeros([1, input_shape[0], input_shape[1], 1], dtype=dtype), align_corners=True) out = sess.run(resized, {grads: grads_np[np.newaxis, :, :, np.newaxis]}) self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out) def testAlignCorners1x2To3x2(self): for dtype in self.float_types: self._assertForwardOpMatchesExpected( np.array([[1, 2]], dtype=dtype), [3, 3], expected=np.array( [[1, 1.5, 2], [1, 1.5, 2], [1, 1.5, 2]], dtype=np.float32)) def testAlignCorners1x2To3x2Grad(self): for dtype in self.float_types: self._assertBackwardOpMatchesExpected( np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32), input_shape=[1, 2], dtype=dtype, expected=np.array([[9, 12]], dtype=np.float32)) def testAlignCorners2x2To1x1(self): for dtype in self.float_types: self._assertForwardOpMatchesExpected( np.array([[1, 2], [3, 4]], dtype=dtype), [1, 1], expected=np.array([[1]], dtype=np.float32)) def testAlignCorners2x2To1x1Grad(self): for dtype in self.float_types: self._assertBackwardOpMatchesExpected( np.array([[7]], dtype=np.float32), input_shape=[2, 2], dtype=dtype, expected=np.array([[7, 0], [0, 0]], dtype=np.float32)) def testAlignCorners2x2To3x3(self): for dtype in self.float_types: self._assertForwardOpMatchesExpected( np.array([[1, 2], [3, 4]], dtype=dtype), [3, 3], expected=np.array( [[1, 1.5, 2], [2, 2.5, 3], [3, 3.5, 4]], dtype=np.float32)) def testAlignCorners2x2To3x3Grad(self): self._assertBackwardOpMatchesExpected( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), input_shape=[2, 2], expected=np.array([[5.25, 8.25], [14.25, 17.25]], dtype=np.float32)) def testAlignCorners3x3To2x2(self): for dtype in self.float_types: self._assertForwardOpMatchesExpected( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype), [2, 2], expected=np.array([[1, 3], [7, 9]], dtype=np.float32)) def testAlignCorners3x3To2x2Grad(self): for dtype in self.float_types: self._assertBackwardOpMatchesExpected( np.array([[7, 13], [22, 4]], dtype=np.float32), input_shape=[3, 3], dtype=dtype, expected=np.array( [[7, 0, 13], [0, 0, 0], [22, 0, 4]], dtype=np.float32)) def testAlignCorners4x4To3x3(self): for dtype in self.float_types: self._assertForwardOpMatchesExpected( np.array( [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]], dtype=dtype), [3, 3], expected=np.array( [[1, 2.5, 4], [7, 8.5, 10], [13, 14.5, 16]], dtype=np.float32)) def testAlignCorners4x4To3x3Grad(self): for dtype in self.float_types: self._assertBackwardOpMatchesExpected( np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), input_shape=[4, 4], dtype=dtype, expected=np.array( [[1, 1, 1, 3], [2, 1.25, 1.25, 3], [2, 1.25, 1.25, 3], [7, 4, 4, 9]], dtype=np.float32)) if __name__ == "__main__": test.main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Afterburn <https://github.com/afterburn> # (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com> # (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: pear short_description: Manage pear/pecl packages description: - Manage PHP packages with the pear package manager. version_added: 2.0 author: - "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>" options: name: description: - Name of the package to install, upgrade, or remove. required: true state: description: - Desired state of the package. default: "present" choices: ["present", "absent", "latest"] executable: description: - Path to the pear executable version_added: "2.4" ''' EXAMPLES = ''' # Install pear package - pear: name: Net_URL2 state: present # Install pecl package - pear: name: pecl/json_post state: present # Upgrade package - pear: name: Net_URL2 state: latest # Remove packages - pear: name: Net_URL2,pecl/json_post state: absent ''' import os from ansible.module_utils.basic import AnsibleModule def get_local_version(pear_output): """Take pear remoteinfo output and get the installed version""" lines = pear_output.split('\n') for line in lines: if 'Installed ' in line: installed = line.rsplit(None, 1)[-1].strip() if installed == '-': continue return installed return None def _get_pear_path(module): if module.params['executable'] and os.path.isfile(module.params['executable']): result = module.params['executable'] else: result = module.get_bin_path('pear', True, [module.params['executable']]) return result def get_repository_version(pear_output): """Take pear remote-info output and get the latest version""" lines = pear_output.split('\n') for line in lines: if 'Latest ' in line: return line.rsplit(None, 1)[-1].strip() return None def query_package(module, name, state="present"): """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date.""" if state == "present": lcmd = "%s info %s" % (_get_pear_path(module), name) lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False) if lrc != 0: # package is not installed locally return False, False rcmd = "%s remote-info %s" % (_get_pear_path(module), name) rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False) # get the version installed locally (if any) lversion = get_local_version(rstdout) # get the version in the repository rversion = get_repository_version(rstdout) if rrc == 0: # Return True to indicate that the package is installed locally, # and the result of the version number comparison # to determine if the package is up-to-date. return True, (lversion == rversion) return False, False def remove_packages(module, packages): remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: # Query the package first, to see if we even need to remove installed, updated = query_package(module, package) if not installed: continue cmd = "%s uninstall %s" % (_get_pear_path(module), package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to remove %s" % (package)) remove_c += 1 if remove_c > 0: module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) module.exit_json(changed=False, msg="package(s) already absent") def install_packages(module, state, packages): install_c = 0 for i, package in enumerate(packages): # if the package is installed and state == present # or state == latest and is up-to-date then skip installed, updated = query_package(module, package) if installed and (state == 'present' or (state == 'latest' and updated)): continue if state == 'present': command = 'install' if state == 'latest': command = 'upgrade' cmd = "%s %s %s" % (_get_pear_path(module), command, package) rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="failed to install %s" % (package)) install_c += 1 if install_c > 0: module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) module.exit_json(changed=False, msg="package(s) already installed") def check_packages(module, packages, state): would_be_changed = [] for package in packages: installed, updated = query_package(module, package) if ((state in ["present", "latest"] and not installed) or (state == "absent" and installed) or (state == "latest" and not updated)): would_be_changed.append(package) if would_be_changed: if state == "absent": state = "removed" module.exit_json(changed=True, msg="%s package(s) would be %s" % ( len(would_be_changed), state)) else: module.exit_json(change=False, msg="package(s) already %s" % state) def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['pkg'], required=True), state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']), executable=dict(default=None, required=False, type='path')), supports_check_mode=True) p = module.params # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' elif p['state'] in ['absent', 'removed']: p['state'] = 'absent' if p['name']: pkgs = p['name'].split(',') pkg_files = [] for i, pkg in enumerate(pkgs): pkg_files.append(None) if module.check_mode: check_packages(module, pkgs, p['state']) if p['state'] in ['present', 'latest']: install_packages(module, p['state'], pkgs) elif p['state'] == 'absent': remove_packages(module, pkgs) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2007-2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://babel.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://babel.edgewall.org/log/. """Frontends for the message extraction functionality.""" from ConfigParser import RawConfigParser from datetime import datetime from distutils import log from distutils.cmd import Command from distutils.errors import DistutilsOptionError, DistutilsSetupError from locale import getpreferredencoding import logging from optparse import OptionParser import os import re import shutil from StringIO import StringIO import sys import tempfile from babel import __version__ as VERSION from babel import Locale, localedata from babel.core import UnknownLocaleError from babel.messages.catalog import Catalog from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \ DEFAULT_MAPPING from babel.messages.mofile import write_mo from babel.messages.pofile import read_po, write_po from babel.messages.plurals import PLURALS from babel.util import odict, LOCALTZ __all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages', 'init_catalog', 'check_message_extractors', 'update_catalog'] __docformat__ = 'restructuredtext en' class compile_catalog(Command): """Catalog compilation command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import compile_catalog setup( ... cmdclass = {'compile_catalog': compile_catalog} ) :since: version 0.9 :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_ :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_ """ description = 'compile message catalogs to binary MO files' user_options = [ ('domain=', 'D', "domain of PO file (default 'messages')"), ('directory=', 'd', 'path to base directory containing the catalogs'), ('input-file=', 'i', 'name of the input file'), ('output-file=', 'o', "name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"), ('locale=', 'l', 'locale of the catalog to compile'), ('use-fuzzy', 'f', 'also include fuzzy translations'), ('statistics', None, 'print statistics about translations') ] boolean_options = ['use-fuzzy', 'statistics'] def initialize_options(self): self.domain = 'messages' self.directory = None self.input_file = None self.output_file = None self.locale = None self.use_fuzzy = False self.statistics = False def finalize_options(self): if not self.input_file and not self.directory: raise DistutilsOptionError('you must specify either the input file ' 'or the base directory') if not self.output_file and not self.directory: raise DistutilsOptionError('you must specify either the input file ' 'or the base directory') def run(self): po_files = [] mo_files = [] if not self.input_file: if self.locale: po_files.append((self.locale, os.path.join(self.directory, self.locale, 'LC_MESSAGES', self.domain + '.po'))) mo_files.append(os.path.join(self.directory, self.locale, 'LC_MESSAGES', self.domain + '.mo')) else: for locale in os.listdir(self.directory): po_file = os.path.join(self.directory, locale, 'LC_MESSAGES', self.domain + '.po') if os.path.exists(po_file): po_files.append((locale, po_file)) mo_files.append(os.path.join(self.directory, locale, 'LC_MESSAGES', self.domain + '.mo')) else: po_files.append((self.locale, self.input_file)) if self.output_file: mo_files.append(self.output_file) else: mo_files.append(os.path.join(self.directory, self.locale, 'LC_MESSAGES', self.domain + '.mo')) if not po_files: raise DistutilsOptionError('no message catalogs found') for idx, (locale, po_file) in enumerate(po_files): mo_file = mo_files[idx] infile = open(po_file, 'r') try: catalog = read_po(infile, locale) finally: infile.close() if self.statistics: translated = 0 for message in list(catalog)[1:]: if message.string: translated +=1 percentage = 0 if len(catalog): percentage = translated * 100 // len(catalog) log.info('%d of %d messages (%d%%) translated in %r', translated, len(catalog), percentage, po_file) if catalog.fuzzy and not self.use_fuzzy: log.warn('catalog %r is marked as fuzzy, skipping', po_file) continue for message, errors in catalog.check(): for error in errors: log.error('error: %s:%d: %s', po_file, message.lineno, error) log.info('compiling catalog %r to %r', po_file, mo_file) outfile = open(mo_file, 'wb') try: write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy) finally: outfile.close() class extract_messages(Command): """Message extraction command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import extract_messages setup( ... cmdclass = {'extract_messages': extract_messages} ) :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_ :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_ """ description = 'extract localizable strings from the project code' user_options = [ ('charset=', None, 'charset to use in the output file'), ('keywords=', 'k', 'space-separated list of keywords to look for in addition to the ' 'defaults'), ('no-default-keywords', None, 'do not include the default keywords'), ('mapping-file=', 'F', 'path to the mapping configuration file'), ('no-location', None, 'do not include location comments with filename and line number'), ('omit-header', None, 'do not include msgid "" entry in header'), ('output-file=', 'o', 'name of the output file'), ('width=', 'w', 'set output line width (default 76)'), ('no-wrap', None, 'do not break long message lines, longer than the output line width, ' 'into several lines'), ('sort-output', None, 'generate sorted output (default False)'), ('sort-by-file', None, 'sort output by file location (default False)'), ('msgid-bugs-address=', None, 'set report address for msgid'), ('copyright-holder=', None, 'set copyright holder in output'), ('add-comments=', 'c', 'place comment block with TAG (or those preceding keyword lines) in ' 'output file. Seperate multiple TAGs with commas(,)'), ('strip-comments', None, 'strip the comment TAGs from the comments.'), ('input-dirs=', None, 'directories that should be scanned for messages'), ] boolean_options = [ 'no-default-keywords', 'no-location', 'omit-header', 'no-wrap', 'sort-output', 'sort-by-file', 'strip-comments' ] def initialize_options(self): self.charset = 'utf-8' self.keywords = '' self._keywords = DEFAULT_KEYWORDS.copy() self.no_default_keywords = False self.mapping_file = None self.no_location = False self.omit_header = False self.output_file = None self.input_dirs = None self.width = None self.no_wrap = False self.sort_output = False self.sort_by_file = False self.msgid_bugs_address = None self.copyright_holder = None self.add_comments = None self._add_comments = [] self.strip_comments = False def finalize_options(self): if self.no_default_keywords and not self.keywords: raise DistutilsOptionError('you must specify new keywords if you ' 'disable the default ones') if self.no_default_keywords: self._keywords = {} if self.keywords: self._keywords.update(parse_keywords(self.keywords.split())) if not self.output_file: raise DistutilsOptionError('no output file specified') if self.no_wrap and self.width: raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " "exclusive") if not self.no_wrap and not self.width: self.width = 76 elif self.width is not None: self.width = int(self.width) if self.sort_output and self.sort_by_file: raise DistutilsOptionError("'--sort-output' and '--sort-by-file' " "are mutually exclusive") if not self.input_dirs: self.input_dirs = dict.fromkeys([k.split('.',1)[0] for k in self.distribution.packages ]).keys() if self.add_comments: self._add_comments = self.add_comments.split(',') def run(self): mappings = self._get_mappings() outfile = open(self.output_file, 'w') try: catalog = Catalog(project=self.distribution.get_name(), version=self.distribution.get_version(), msgid_bugs_address=self.msgid_bugs_address, copyright_holder=self.copyright_holder, charset=self.charset) for dirname, (method_map, options_map) in mappings.items(): def callback(filename, method, options): if method == 'ignore': return filepath = os.path.normpath(os.path.join(dirname, filename)) optstr = '' if options: optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for k, v in options.items()]) log.info('extracting messages from %s%s', filepath, optstr) extracted = extract_from_dir(dirname, method_map, options_map, keywords=self._keywords, comment_tags=self._add_comments, callback=callback, strip_comment_tags= self.strip_comments) for filename, lineno, message, comments in extracted: filepath = os.path.normpath(os.path.join(dirname, filename)) catalog.add(message, None, [(filepath, lineno)], auto_comments=comments) log.info('writing PO template file to %s' % self.output_file) write_po(outfile, catalog, width=self.width, no_location=self.no_location, omit_header=self.omit_header, sort_output=self.sort_output, sort_by_file=self.sort_by_file) finally: outfile.close() def _get_mappings(self): mappings = {} if self.mapping_file: fileobj = open(self.mapping_file, 'U') try: method_map, options_map = parse_mapping(fileobj) for dirname in self.input_dirs: mappings[dirname] = method_map, options_map finally: fileobj.close() elif getattr(self.distribution, 'message_extractors', None): message_extractors = self.distribution.message_extractors for dirname, mapping in message_extractors.items(): if isinstance(mapping, basestring): method_map, options_map = parse_mapping(StringIO(mapping)) else: method_map, options_map = [], {} for pattern, method, options in mapping: method_map.append((pattern, method)) options_map[pattern] = options or {} mappings[dirname] = method_map, options_map else: for dirname in self.input_dirs: mappings[dirname] = DEFAULT_MAPPING, {} return mappings def check_message_extractors(dist, name, value): """Validate the ``message_extractors`` keyword argument to ``setup()``. :param dist: the distutils/setuptools ``Distribution`` object :param name: the name of the keyword argument (should always be "message_extractors") :param value: the value of the keyword argument :raise `DistutilsSetupError`: if the value is not valid :see: `Adding setup() arguments <http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_ """ assert name == 'message_extractors' if not isinstance(value, dict): raise DistutilsSetupError('the value of the "message_extractors" ' 'parameter must be a dictionary') class init_catalog(Command): """New catalog initialization command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import init_catalog setup( ... cmdclass = {'init_catalog': init_catalog} ) :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_ :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_ """ description = 'create a new catalog based on a POT file' user_options = [ ('domain=', 'D', "domain of PO file (default 'messages')"), ('input-file=', 'i', 'name of the input file'), ('output-dir=', 'd', 'path to output directory'), ('output-file=', 'o', "name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"), ('locale=', 'l', 'locale for the new localized catalog'), ] def initialize_options(self): self.output_dir = None self.output_file = None self.input_file = None self.locale = None self.domain = 'messages' def finalize_options(self): if not self.input_file: raise DistutilsOptionError('you must specify the input file') if not self.locale: raise DistutilsOptionError('you must provide a locale for the ' 'new catalog') try: self._locale = Locale.parse(self.locale) except UnknownLocaleError, e: raise DistutilsOptionError(e) if not self.output_file and not self.output_dir: raise DistutilsOptionError('you must specify the output directory') if not self.output_file: self.output_file = os.path.join(self.output_dir, self.locale, 'LC_MESSAGES', self.domain + '.po') if not os.path.exists(os.path.dirname(self.output_file)): os.makedirs(os.path.dirname(self.output_file)) def run(self): log.info('creating catalog %r based on %r', self.output_file, self.input_file) infile = open(self.input_file, 'r') try: # Although reading from the catalog template, read_po must be fed # the locale in order to correcly calculate plurals catalog = read_po(infile, locale=self.locale) finally: infile.close() catalog.locale = self._locale catalog.fuzzy = False outfile = open(self.output_file, 'w') try: write_po(outfile, catalog) finally: outfile.close() class update_catalog(Command): """Catalog merging command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import update_catalog setup( ... cmdclass = {'update_catalog': update_catalog} ) :since: version 0.9 :see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_ :see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_ """ description = 'update message catalogs from a POT file' user_options = [ ('domain=', 'D', "domain of PO file (default 'messages')"), ('input-file=', 'i', 'name of the input file'), ('output-dir=', 'd', 'path to base directory containing the catalogs'), ('output-file=', 'o', "name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"), ('locale=', 'l', 'locale of the catalog to compile'), ('ignore-obsolete=', None, 'whether to omit obsolete messages from the output'), ('no-fuzzy-matching', 'N', 'do not use fuzzy matching'), ('previous', None, 'keep previous msgids of translated messages') ] boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous'] def initialize_options(self): self.domain = 'messages' self.input_file = None self.output_dir = None self.output_file = None self.locale = None self.ignore_obsolete = False self.no_fuzzy_matching = False self.previous = False def finalize_options(self): if not self.input_file: raise DistutilsOptionError('you must specify the input file') if not self.output_file and not self.output_dir: raise DistutilsOptionError('you must specify the output file or ' 'directory') if self.output_file and not self.locale: raise DistutilsOptionError('you must specify the locale') if self.no_fuzzy_matching and self.previous: self.previous = False def run(self): po_files = [] if not self.output_file: if self.locale: po_files.append((self.locale, os.path.join(self.output_dir, self.locale, 'LC_MESSAGES', self.domain + '.po'))) else: for locale in os.listdir(self.output_dir): po_file = os.path.join(self.output_dir, locale, 'LC_MESSAGES', self.domain + '.po') if os.path.exists(po_file): po_files.append((locale, po_file)) else: po_files.append((self.locale, self.output_file)) domain = self.domain if not domain: domain = os.path.splitext(os.path.basename(self.input_file))[0] infile = open(self.input_file, 'U') try: template = read_po(infile) finally: infile.close() if not po_files: raise DistutilsOptionError('no message catalogs found') for locale, filename in po_files: log.info('updating catalog %r based on %r', filename, self.input_file) infile = open(filename, 'U') try: catalog = read_po(infile, locale=locale, domain=domain) finally: infile.close() catalog.update(template, self.no_fuzzy_matching) tmpname = os.path.join(os.path.dirname(filename), tempfile.gettempprefix() + os.path.basename(filename)) tmpfile = open(tmpname, 'w') try: try: write_po(tmpfile, catalog, ignore_obsolete=self.ignore_obsolete, include_previous=self.previous) finally: tmpfile.close() except: os.remove(tmpname) raise try: os.rename(tmpname, filename) except OSError: # We're probably on Windows, which doesn't support atomic # renames, at least not through Python # If the error is in fact due to a permissions problem, that # same error is going to be raised from one of the following # operations os.remove(filename) shutil.copy(tmpname, filename) os.remove(tmpname) class CommandLineInterface(object): """Command-line interface. This class provides a simple command-line interface to the message extraction and PO file generation functionality. """ usage = '%%prog %s [options] %s' version = '%%prog %s' % VERSION commands = { 'compile': 'compile message catalogs to MO files', 'extract': 'extract messages from source files and generate a POT file', 'init': 'create new message catalogs from a POT file', 'update': 'update existing message catalogs from a POT file' } def run(self, argv=sys.argv): """Main entry point of the command-line interface. :param argv: list of arguments passed on the command-line """ self.parser = OptionParser(usage=self.usage % ('command', '[args]'), version=self.version) self.parser.disable_interspersed_args() self.parser.print_help = self._help self.parser.add_option('--list-locales', dest='list_locales', action='store_true', help="print all known locales and exit") self.parser.add_option('-v', '--verbose', action='store_const', dest='loglevel', const=logging.DEBUG, help='print as much as possible') self.parser.add_option('-q', '--quiet', action='store_const', dest='loglevel', const=logging.ERROR, help='print as little as possible') self.parser.set_defaults(list_locales=False, loglevel=logging.INFO) options, args = self.parser.parse_args(argv[1:]) # Configure logging self.log = logging.getLogger('babel') self.log.setLevel(options.loglevel) handler = logging.StreamHandler() handler.setLevel(options.loglevel) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) self.log.addHandler(handler) if options.list_locales: identifiers = localedata.list() longest = max([len(identifier) for identifier in identifiers]) identifiers.sort() format = u'%%-%ds %%s' % (longest + 1) for identifier in identifiers: locale = Locale.parse(identifier) output = format % (identifier, locale.english_name) print output.encode(sys.stdout.encoding or getpreferredencoding() or 'ascii', 'replace') return 0 if not args: self.parser.error('no valid command or option passed. ' 'Try the -h/--help option for more information.') cmdname = args[0] if cmdname not in self.commands: self.parser.error('unknown command "%s"' % cmdname) return getattr(self, cmdname)(args[1:]) def _help(self): print self.parser.format_help() print "commands:" longest = max([len(command) for command in self.commands]) format = " %%-%ds %%s" % max(8, longest + 1) commands = self.commands.items() commands.sort() for name, description in commands: print format % (name, description) def compile(self, argv): """Subcommand for compiling a message catalog to a MO file. :param argv: the command arguments :since: version 0.9 """ parser = OptionParser(usage=self.usage % ('compile', ''), description=self.commands['compile']) parser.add_option('--domain', '-D', dest='domain', help="domain of MO and PO files (default '%default')") parser.add_option('--directory', '-d', dest='directory', metavar='DIR', help='base directory of catalog files') parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', help='locale of the catalog') parser.add_option('--input-file', '-i', dest='input_file', metavar='FILE', help='name of the input file') parser.add_option('--output-file', '-o', dest='output_file', metavar='FILE', help="name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/" "<domain>.mo')") parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy', action='store_true', help='also include fuzzy translations (default ' '%default)') parser.add_option('--statistics', dest='statistics', action='store_true', help='print statistics about translations') parser.set_defaults(domain='messages', use_fuzzy=False, compile_all=False, statistics=False) options, args = parser.parse_args(argv) po_files = [] mo_files = [] if not options.input_file: if not options.directory: parser.error('you must specify either the input file or the ' 'base directory') if options.locale: po_files.append((options.locale, os.path.join(options.directory, options.locale, 'LC_MESSAGES', options.domain + '.po'))) mo_files.append(os.path.join(options.directory, options.locale, 'LC_MESSAGES', options.domain + '.mo')) else: for locale in os.listdir(options.directory): po_file = os.path.join(options.directory, locale, 'LC_MESSAGES', options.domain + '.po') if os.path.exists(po_file): po_files.append((locale, po_file)) mo_files.append(os.path.join(options.directory, locale, 'LC_MESSAGES', options.domain + '.mo')) else: po_files.append((options.locale, options.input_file)) if options.output_file: mo_files.append(options.output_file) else: if not options.directory: parser.error('you must specify either the input file or ' 'the base directory') mo_files.append(os.path.join(options.directory, options.locale, 'LC_MESSAGES', options.domain + '.mo')) if not po_files: parser.error('no message catalogs found') for idx, (locale, po_file) in enumerate(po_files): mo_file = mo_files[idx] infile = open(po_file, 'r') try: catalog = read_po(infile, locale) finally: infile.close() if options.statistics: translated = 0 for message in list(catalog)[1:]: if message.string: translated +=1 percentage = 0 if len(catalog): percentage = translated * 100 // len(catalog) self.log.info("%d of %d messages (%d%%) translated in %r", translated, len(catalog), percentage, po_file) if catalog.fuzzy and not options.use_fuzzy: self.log.warn('catalog %r is marked as fuzzy, skipping', po_file) continue for message, errors in catalog.check(): for error in errors: self.log.error('error: %s:%d: %s', po_file, message.lineno, error) self.log.info('compiling catalog %r to %r', po_file, mo_file) outfile = open(mo_file, 'wb') try: write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy) finally: outfile.close() def extract(self, argv): """Subcommand for extracting messages from source files and generating a POT file. :param argv: the command arguments """ parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'), description=self.commands['extract']) parser.add_option('--charset', dest='charset', help='charset to use in the output (default ' '"%default")') parser.add_option('-k', '--keyword', dest='keywords', action='append', help='keywords to look for in addition to the ' 'defaults. You can specify multiple -k flags on ' 'the command line.') parser.add_option('--no-default-keywords', dest='no_default_keywords', action='store_true', help="do not include the default keywords") parser.add_option('--mapping', '-F', dest='mapping_file', help='path to the extraction mapping file') parser.add_option('--no-location', dest='no_location', action='store_true', help='do not include location comments with filename ' 'and line number') parser.add_option('--omit-header', dest='omit_header', action='store_true', help='do not include msgid "" entry in header') parser.add_option('-o', '--output', dest='output', help='path to the output POT file') parser.add_option('-w', '--width', dest='width', type='int', help="set output line width (default 76)") parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true', help='do not break long message lines, longer than ' 'the output line width, into several lines') parser.add_option('--sort-output', dest='sort_output', action='store_true', help='generate sorted output (default False)') parser.add_option('--sort-by-file', dest='sort_by_file', action='store_true', help='sort output by file location (default False)') parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address', metavar='EMAIL@ADDRESS', help='set report address for msgid') parser.add_option('--copyright-holder', dest='copyright_holder', help='set copyright holder in output') parser.add_option('--project', dest='project', help='set project name in output') parser.add_option('--version', dest='version', help='set project version in output') parser.add_option('--add-comments', '-c', dest='comment_tags', metavar='TAG', action='append', help='place comment block with TAG (or those ' 'preceding keyword lines) in output file. One ' 'TAG per argument call') parser.add_option('--strip-comment-tags', '-s', dest='strip_comment_tags', action='store_true', help='Strip the comment tags from the comments.') parser.set_defaults(charset='utf-8', keywords=[], no_default_keywords=False, no_location=False, omit_header = False, width=None, no_wrap=False, sort_output=False, sort_by_file=False, comment_tags=[], strip_comment_tags=False) options, args = parser.parse_args(argv) if not args: parser.error('incorrect number of arguments') if options.output not in (None, '-'): outfile = open(options.output, 'w') else: outfile = sys.stdout keywords = DEFAULT_KEYWORDS.copy() if options.no_default_keywords: if not options.keywords: parser.error('you must specify new keywords if you disable the ' 'default ones') keywords = {} if options.keywords: keywords.update(parse_keywords(options.keywords)) if options.mapping_file: fileobj = open(options.mapping_file, 'U') try: method_map, options_map = parse_mapping(fileobj) finally: fileobj.close() else: method_map = DEFAULT_MAPPING options_map = {} if options.width and options.no_wrap: parser.error("'--no-wrap' and '--width' are mutually exclusive.") elif not options.width and not options.no_wrap: options.width = 76 if options.sort_output and options.sort_by_file: parser.error("'--sort-output' and '--sort-by-file' are mutually " "exclusive") try: catalog = Catalog(project=options.project, version=options.version, msgid_bugs_address=options.msgid_bugs_address, copyright_holder=options.copyright_holder, charset=options.charset) for dirname in args: if not os.path.isdir(dirname): parser.error('%r is not a directory' % dirname) def callback(filename, method, options): if method == 'ignore': return filepath = os.path.normpath(os.path.join(dirname, filename)) optstr = '' if options: optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for k, v in options.items()]) self.log.info('extracting messages from %s%s', filepath, optstr) extracted = extract_from_dir(dirname, method_map, options_map, keywords, options.comment_tags, callback=callback, strip_comment_tags= options.strip_comment_tags) for filename, lineno, message, comments in extracted: filepath = os.path.normpath(os.path.join(dirname, filename)) catalog.add(message, None, [(filepath, lineno)], auto_comments=comments) if options.output not in (None, '-'): self.log.info('writing PO template file to %s' % options.output) write_po(outfile, catalog, width=options.width, no_location=options.no_location, omit_header=options.omit_header, sort_output=options.sort_output, sort_by_file=options.sort_by_file) finally: if options.output: outfile.close() def init(self, argv): """Subcommand for creating new message catalogs from a template. :param argv: the command arguments """ parser = OptionParser(usage=self.usage % ('init', ''), description=self.commands['init']) parser.add_option('--domain', '-D', dest='domain', help="domain of PO file (default '%default')") parser.add_option('--input-file', '-i', dest='input_file', metavar='FILE', help='name of the input file') parser.add_option('--output-dir', '-d', dest='output_dir', metavar='DIR', help='path to output directory') parser.add_option('--output-file', '-o', dest='output_file', metavar='FILE', help="name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/" "<domain>.po')") parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', help='locale for the new localized catalog') parser.set_defaults(domain='messages') options, args = parser.parse_args(argv) if not options.locale: parser.error('you must provide a locale for the new catalog') try: locale = Locale.parse(options.locale) except UnknownLocaleError, e: parser.error(e) if not options.input_file: parser.error('you must specify the input file') if not options.output_file and not options.output_dir: parser.error('you must specify the output file or directory') if not options.output_file: options.output_file = os.path.join(options.output_dir, options.locale, 'LC_MESSAGES', options.domain + '.po') if not os.path.exists(os.path.dirname(options.output_file)): os.makedirs(os.path.dirname(options.output_file)) infile = open(options.input_file, 'r') try: # Although reading from the catalog template, read_po must be fed # the locale in order to correcly calculate plurals catalog = read_po(infile, locale=options.locale) finally: infile.close() catalog.locale = locale catalog.revision_date = datetime.now(LOCALTZ) self.log.info('creating catalog %r based on %r', options.output_file, options.input_file) outfile = open(options.output_file, 'w') try: write_po(outfile, catalog) finally: outfile.close() def update(self, argv): """Subcommand for updating existing message catalogs from a template. :param argv: the command arguments :since: version 0.9 """ parser = OptionParser(usage=self.usage % ('update', ''), description=self.commands['update']) parser.add_option('--domain', '-D', dest='domain', help="domain of PO file (default '%default')") parser.add_option('--input-file', '-i', dest='input_file', metavar='FILE', help='name of the input file') parser.add_option('--output-dir', '-d', dest='output_dir', metavar='DIR', help='path to output directory') parser.add_option('--output-file', '-o', dest='output_file', metavar='FILE', help="name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/" "<domain>.po')") parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE', help='locale of the translations catalog') parser.add_option('--ignore-obsolete', dest='ignore_obsolete', action='store_true', help='do not include obsolete messages in the output ' '(default %default)') parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching', action='store_true', help='do not use fuzzy matching (default %default)') parser.add_option('--previous', dest='previous', action='store_true', help='keep previous msgids of translated messages ' '(default %default)') parser.set_defaults(domain='messages', ignore_obsolete=False, no_fuzzy_matching=False, previous=False) options, args = parser.parse_args(argv) if not options.input_file: parser.error('you must specify the input file') if not options.output_file and not options.output_dir: parser.error('you must specify the output file or directory') if options.output_file and not options.locale: parser.error('you must specify the locale') if options.no_fuzzy_matching and options.previous: options.previous = False po_files = [] if not options.output_file: if options.locale: po_files.append((options.locale, os.path.join(options.output_dir, options.locale, 'LC_MESSAGES', options.domain + '.po'))) else: for locale in os.listdir(options.output_dir): po_file = os.path.join(options.output_dir, locale, 'LC_MESSAGES', options.domain + '.po') if os.path.exists(po_file): po_files.append((locale, po_file)) else: po_files.append((options.locale, options.output_file)) domain = options.domain if not domain: domain = os.path.splitext(os.path.basename(options.input_file))[0] infile = open(options.input_file, 'U') try: template = read_po(infile) finally: infile.close() if not po_files: parser.error('no message catalogs found') for locale, filename in po_files: self.log.info('updating catalog %r based on %r', filename, options.input_file) infile = open(filename, 'U') try: catalog = read_po(infile, locale=locale, domain=domain) finally: infile.close() catalog.update(template, options.no_fuzzy_matching) tmpname = os.path.join(os.path.dirname(filename), tempfile.gettempprefix() + os.path.basename(filename)) tmpfile = open(tmpname, 'w') try: try: write_po(tmpfile, catalog, ignore_obsolete=options.ignore_obsolete, include_previous=options.previous) finally: tmpfile.close() except: os.remove(tmpname) raise try: os.rename(tmpname, filename) except OSError: # We're probably on Windows, which doesn't support atomic # renames, at least not through Python # If the error is in fact due to a permissions problem, that # same error is going to be raised from one of the following # operations os.remove(filename) shutil.copy(tmpname, filename) os.remove(tmpname) def main(): return CommandLineInterface().run(sys.argv) def parse_mapping(fileobj, filename=None): """Parse an extraction method mapping from a file-like object. >>> buf = StringIO(''' ... [extractors] ... custom = mypackage.module:myfunc ... ... # Python source files ... [python: **.py] ... ... # Genshi templates ... [genshi: **/templates/**.html] ... include_attrs = ... [genshi: **/templates/**.txt] ... template_class = genshi.template:TextTemplate ... encoding = latin-1 ... ... # Some custom extractor ... [custom: **/custom/*.*] ... ''') >>> method_map, options_map = parse_mapping(buf) >>> len(method_map) 4 >>> method_map[0] ('**.py', 'python') >>> options_map['**.py'] {} >>> method_map[1] ('**/templates/**.html', 'genshi') >>> options_map['**/templates/**.html']['include_attrs'] '' >>> method_map[2] ('**/templates/**.txt', 'genshi') >>> options_map['**/templates/**.txt']['template_class'] 'genshi.template:TextTemplate' >>> options_map['**/templates/**.txt']['encoding'] 'latin-1' >>> method_map[3] ('**/custom/*.*', 'mypackage.module:myfunc') >>> options_map['**/custom/*.*'] {} :param fileobj: a readable file-like object containing the configuration text to parse :return: a `(method_map, options_map)` tuple :rtype: `tuple` :see: `extract_from_directory` """ extractors = {} method_map = [] options_map = {} parser = RawConfigParser() parser._sections = odict(parser._sections) # We need ordered sections parser.readfp(fileobj, filename) for section in parser.sections(): if section == 'extractors': extractors = dict(parser.items(section)) else: method, pattern = [part.strip() for part in section.split(':', 1)] method_map.append((pattern, method)) options_map[pattern] = dict(parser.items(section)) if extractors: for idx, (pattern, method) in enumerate(method_map): if method in extractors: method = extractors[method] method_map[idx] = (pattern, method) return (method_map, options_map) def parse_keywords(strings=[]): """Parse keywords specifications from the given list of strings. >>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3']).items() >>> kw.sort() >>> for keyword, indices in kw: ... print (keyword, indices) ('_', None) ('dgettext', (2,)) ('dngettext', (2, 3)) """ keywords = {} for string in strings: if ':' in string: funcname, indices = string.split(':') else: funcname, indices = string, None if funcname not in keywords: if indices: indices = tuple([(int(x)) for x in indices.split(',')]) keywords[funcname] = indices return keywords if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: UTF-8 -* import os from app import create_app, db from app.models import User, Role, Permission, Coupon, SecKill, Datemark from flask_script import Manager, Shell from flask_migrate import Migrate, MigrateCommand app = create_app(os.getenv('ATH_CONFIG') or 'default') manager = Manager(app) def make_shell_context(): return dict(app=app, db=db, User=User, Role=Role) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command("db", MigrateCommand) @manager.command def test(): """Run the unit tests.""" import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': manager.run() def init_app_data(): db.drop_all() db.create_all() Coupon.insert_coupons() Role.insert_roles() u = User(ip="127.0.0.1", name="Administrator", role=Role.query.filter_by(permissions=0xff).first()) db.session.add(u) db.session.commit() def calculated(): sk = SecKill.query.filter_by(win=False).filter_by(datemark=Datemark.today()).order_by(SecKill.kill_time).all() sk1 = sk[0] db.session.add(sk1) db.session.commit()
unknown
codeparrot/codeparrot-clean
function foo() { let x = 1; let y = 2; if (y === 2) { x = 3; } if (y === 3) { x = 5; } y = x; } export const FIXTURE_ENTRYPOINT = { fn: foo, params: [], isComponent: false, };
javascript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/ssa-complex-multiple-if.js
#! /usr/bin/env python # -*- coding: utf-8 -*- ''' Manage your Nagios on-call roster with Google apps. Usage: googios --help googios setup googios <roster> current [start end name email phone] [--echo] googios <roster> query [--start=<start> --end=<end> | --at=<at>] [--echo] googios <roster> report [<fuzzy> | <start> <end>] [--echo] googios <roster> update [--echo] googios <roster> runway [--echo] googios <roster> status [--echo] Options: -h --help Show this screen. --version Show version. -e --echo Log to stdout/stderr rather than to the usual file. -a --at=<at> Moment (UTC) in time -f --start=<start> Minimum ending (UTC) of a shift. -t --end=<end> Maximum starting (UTC) of a shift. The <roster> parameter: The roster can either be the roster's human-friendly name (if the script is ran from the directory with its configuration file) or the full path to the configuration file. Sub-commands: setup Run a wizard for generating a configuration file. current Information on the current person on duty. It is possible to limit what information is given by white-listing any number of the 5 fields (start, end, name, email, phone). If no white-list is provided, all info are printed. query All shifts between <start> and <end>, or at the <at> moment. <start>, <end> and <at> accept a variety of formats, some of whick may be inherently ambiguous (see examples below). When running a query programmatically, is safer to use the ISO 8601 format (e.g.: 2014-12-09T07:39:22+00:00) report Similar to query, but meant for human consumption and with shifts grouped by working day. By default it outputs the report of the previous month, but this can be altered with either the <start> and <end> parameters or with <fuzzy>, which try to fuzzy-match expressions like for example "october" or "apr 2012". `report` groups shifts by day, taking in account the "roster.time_shift" parameter in the configuration file. update Force to rebuild the cache with live data. runway Return the number of full days for which shifts have been *cached* from now onwards. Note that this subcommand operates on the cache (i.e.: not on the live data), the rationale being that `runaway` should tell you what you can rely on, even in case of loss of connectivity. If the time series has "holes" in it, `runway` will return the number of full cached days until the first hole, even if more shifts have been scheduled afterwards. status Perform a sanity check of the roster. Print stats and - in case of problems - exit with a non-zero status. Examples: googios setup googios dev update --echo googios dev current name phone googios /var/googios/dev.conf current googios dev query --at='12:30' googios dev query --start='1 nov' --end='5 nov' googios dev query --at='2013-12-11T10:09:08+02:00' googios dev report googios dev report august googios dev runway googios dev status ''' import os import json import logging import datetime from random import choice from functools import partial from collections import defaultdict import pytz from dateutil.relativedelta import relativedelta from docopt import docopt from roster import Roster, Shift, NA_TOKEN from wizard.wizard import Wizard from utils import ( log, log_format, log_stream_handler, get_calendar_service, get_people_client, dtfy ) def load_config(string_): '''Load configuration from file.''' try: with open('{}.config'.format(string_)) as file_: config = json.load(file_) config['oauth.directory'] = os.path.dirname(string_) return config except IOError: pass try: with open(string_) as file_: config = json.load(file_) config['oauth.directory'] = os.getcwd() return config except IOError: # The following will always be logged on screen, obviously... log.critical('Could not open configuration for "{}"'.format(string_)) exit(os.EX_DATAERR) def modify_logger(cli, config): '''Modify the logger so as ''' if cli['--echo']: return log.removeHandler(log_stream_handler) log_level = config['log.level'] log_dir = config['log.directory'] log_fname = os.path.join(log_dir, '{}.log'.format(config['roster.name'])) log_file_handler = logging.FileHandler(log_fname) log_file_handler.setFormatter(log_format) log.addHandler(log_file_handler) log.setLevel(log_level) def get_roster(config): '''Return the roster to perform script operations on.''' now = datetime.datetime.now(tz=pytz.UTC) min_end = (now - datetime.timedelta(days=config['cache.past'])).isoformat() if config['cache.future'] is not None: max_start = now + datetime.timedelta(days=config['cache.future']) max_start = max_start.isoformat() else: max_start = None cal_clbk = partial(get_calendar_service, oauth_dir=config['oauth.directory']) ppl_clbk = partial(get_people_client, oauth_dir=config['oauth.directory']) return Roster( name=config['roster.name'], cid=config['roster.cid'], cal_service_clbk=cal_clbk, ppl_client_clbk=ppl_clbk, min_end=min_end, max_start=max_start, all_day_offset=config['roster.time_shift'], cache_timeout=config['cache.timeout'], cache_directory=config['cache.directory'] ) def current(roster, cli, config): '''Print information on the current shift in the roster.''' # roster.current return a *list* of all the people on duty shifts = roster.current if len(shifts) == 1: [current] = shifts elif len(shifts) == 0: log.error('Nobody is on duty.') now = datetime.datetime.now(tz=pytz.UTC) current = Shift(now, now, None, None, None) else: log.error('Several people where on duty, picking a random one.') for counter, shift in enumerate(shifts, 1): log.error('On duty #{}: {}'.format(counter, shift)) current = choice(shifts) # Replace missing fields with fallback ones if not current.email: current.email = config['fallback.email'] if current.name is not None: log.error('Missing email address for "{}"'.format(current.name)) if not current.phone: current.phone = config['fallback.phone'] if current.name is not None: log.error('Missing phone number for "{}"'.format(current.name)) current.name = current.name or 'Fallback Contact Details' # Compute what fields to output fields = ('start', 'end', 'name', 'email', 'phone') mask = [] for attr_name in fields: mask.append(cli[attr_name]) if not any(mask): mask = [True] * 5 # No explicit field, means all fields bits = [val for val, flag in zip(current.as_string_tuple, mask) if flag] print('\t'.join(bits)) def query(roster, cli, config): '''Print a roster query result.''' start = cli['--start'] or cli['--at'] end = cli['--end'] or cli['--at'] if end < start: msg = 'Tried to query roster for a negative timespan ({} to {})' log.critical(msg.format(start, end)) exit(os.EX_DATAERR) for shift in roster.query(start, end): print '\t'.join(shift.as_string_tuple) def report(roster, cli, config): '''Print a human-friendly report about a time-slice of the roster.''' time_zone = config['roster.time_zone'] # We use datetimes even if the ultimate goal is operate at date level as # we need to preserve the timezone information all along fuzzy = cli['<fuzzy>'] # Fuzzy should be interpreted as always indicating a month. if fuzzy: try: start = fuzzy.replace(day=1) end = start + relativedelta(months=1, days=-1) except Exception as e: log.critical('Cannot parse <fuzzy> parameter "{}"'.format(fuzzy)) log.exception(e.message) raise # A range can be whatever elif cli['<start>']: start = cli['<start>'] end = cli['<end>'] if start > end: msg = 'Tried to generate a report for negative timespan ({} to {})' log.critical(msg.format(start, end)) exit(os.EX_DATAERR) else: now = datetime.datetime.now(tz=pytz.timezone(time_zone)) start = now.replace(day=1) + relativedelta(months=-1) end = start + relativedelta(months=1, days=-1) data = roster.report(start, end) weekdays = defaultdict(int) weekends = defaultdict(int) for day, people in data: target = weekdays if day.weekday() < 5 else weekends for person in people: target[person] += 1 print('\n O N - C A L L R O S T E R') print('=====================================================') print(' {} - {}\n\n'.format(start.strftime('%d %b %Y'), end.strftime('%d %b %Y'))) for row in data: print(' {:<20}{}'.format(row[0].strftime('%d %b %Y, %a'), ', '.join(row[1]))) print('\n\n SUMMARY') print('-----------------------------------------------------') print(' Name Weekdays Weekends Total') print('-----------------------------------------------------') names = sorted(list(set(weekends.keys() + weekdays.keys()))) template = ' {:<26}{:>3}{:>10}{:>8}' for name in names: wd = weekdays[name] we = weekends[name] print(template.format(name, wd or '-', we or '-', wd + we)) print('-----------------------------------------------------\n') def runway(roster, cli, config): '''Print the number of days in the future before a shift-less moment.''' print (roster.runway - datetime.datetime.now(tz=pytz.UTC)).days def status(roster, cli, config): '''Print statistics on the roster. Exit with error code if problems.''' human_friendly = lambda td: (None if td is None else td.isoformat()[:16].replace('T', ' ')) stats = roster.stats() exit_status = os.EX_OK # Generation of human-readable statistics min_end = human_friendly(stats['roster.min_end']) max_start = human_friendly(stats['roster.max_start']) cache_age = datetime.datetime.now(tz=pytz.UTC) - stats['cache.timestamp'] cache_age = int(cache_age.total_seconds() / 60) cache_size = stats['cache.size'] cache_end = human_friendly(stats['cache.end']) num_overlaps = len(stats['cache.overlaps']) num_holes = len(stats['cache.holes']) if num_overlaps: first_overlap = map(human_friendly, stats['cache.overlaps'][0]) exit_status = os.EX_DATAERR else: first_overlap = NA_TOKEN if num_holes: first_hole = map(human_friendly, stats['cache.holes'][0]) exit_status = os.EX_DATAERR else: first_hole = NA_TOKEN print('\n R O S T E R S T A T I S T I C S') print('=====================================================\n\n') print(' `min_end` query parameter : {}'.format(min_end)) print(' `max_start` query parameter : {}'.format(max_start)) print(' Cache age : {} mins'.format(cache_age)) print(' Cache size : {} shifts'.format(cache_size)) print(' Cache upper limit : {}'.format(cache_end)) print(' Number of roster holes : {}'.format(num_holes)) print(' Roster first hole : {}'.format(first_hole)) print(' Number of roster overlaps : {}'.format(num_overlaps)) print(' Cache first overlap : {}\n'.format(first_overlap)) exit(exit_status) def main(): cli = docopt(__doc__, version='0.1') if cli['setup']: # Given that the wizard is always run by a human, and that log messages # would interfere with the wizard output, we disable logging for it. logging.disable(logging) wizard = Wizard() wizard.run() logging.disable(logging.NOTSET) exit(os.EX_OK) config = load_config(cli['<roster>']) modify_logger(cli, config) for key in ('--start', '--end', '--at', '<start>', '<end>', '<fuzzy>'): if cli[key] is not None: cli[key] = dtfy(cli[key], tz=config['roster.time_zone']) roster = get_roster(config) if cli['current'] is True: current(roster, cli, config) elif cli['query'] is True: query(roster, cli, config) elif cli['report'] is True: report(roster, cli, config) elif cli['update']: roster.update_cache() elif cli['runway'] is True: runway(roster, cli, config) elif cli['status'] is True: status(roster, cli, config) else: log.critical('Something is odd, you should never hit this point...') exit(os.EX_SOFTWARE) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
from numpy import meshgrid, ndarray, array_equal, allclose, array, sqrt, zeros, asarray, where, ones from test_utils import LocalTestCase from thunder.extraction.source import Source, SourceModel class TestSourceConstruction(LocalTestCase): def test_source(self): """ (SourceConstruction) create """ s = Source([[10, 10], [10, 20]]) assert(isinstance(s.coordinates, ndarray)) assert(array_equal(s.coordinates, array([[10, 10], [10, 20]]))) def test_source_with_values(self): """ (SourceConstruction) create with values """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(array_equal(s.coordinates, array([[10, 10], [10, 20]]))) assert(array_equal(s.values, array([1.0, 2.0]))) def test_source_fromMask_binary(self): """ (SourceConstruction) from mask """ mask = zeros((10, 10)) mask[5, 5] = 1 mask[5, 6] = 1 mask[5, 7] = 1 s = Source.fromMask(mask) assert(isinstance(s, Source)) assert(isinstance(s.coordinates, ndarray)) assert(array_equal(s.coordinates, array([[5, 5], [5, 6], [5, 7]]))) assert(array_equal(s.mask((10, 10), binary=True), mask)) assert(array_equal(s.mask((10, 10), binary=False), mask)) def test_source_fromMask_values(self): """ (SourceConstruction) from mask with values """ mask = zeros((10, 10)) mask[5, 5] = 0.5 mask[5, 6] = 0.6 mask[5, 7] = 0.7 s = Source.fromMask(mask) assert(isinstance(s, Source)) assert(isinstance(s.coordinates, ndarray)) assert(isinstance(s.values, ndarray)) assert(array_equal(s.coordinates, array([[5, 5], [5, 6], [5, 7]]))) assert(array_equal(s.values, array([0.5, 0.6, 0.7]))) assert(array_equal(s.mask((10, 10), binary=False), mask)) def test_source_fromCoordinates(self): """ (SourceConstruction) from coordinates """ s = Source.fromCoordinates([[10, 10], [10, 20]]) assert(isinstance(s.coordinates, ndarray)) assert(array_equal(s.coordinates, array([[10, 10], [10, 20]]))) class TestSourceProperties(LocalTestCase): def test_center(self): """ (SourceProperties) center """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(array_equal(s.center, [10, 15])) def test_bbox(self): """ (SourceProperties) bounding box """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(array_equal(s.bbox, [10, 10, 10, 20])) def test_area(self): """ (SourceProperties) area """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(s.area == 2.0) def test_polygon(self): """ (SourceProperties) polygon """ x, y = meshgrid(range(0, 10), range(0, 10)) coords = zip(x.flatten(), y.flatten()) s = Source(coords) assert(array_equal(s.polygon, [[0, 0], [9, 0], [9, 9], [0, 9]])) def test_restore(self): """ (SourceProperties) remove lazy attributes """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(array_equal(s.center, [10, 15])) assert("center" in s.__dict__.keys()) s.restore() assert("center" not in s.__dict__.keys()) assert(array_equal(s.center, [10, 15])) assert("center" in s.__dict__.keys()) s.restore(skip="center") assert("center" in s.__dict__.keys()) class TestSourceMethods(LocalTestCase): def test_merge(self): """ (SourceMethods) merge """ s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) s2 = Source([[10, 30], [10, 40]], values=[4.0, 5.0]) s1.merge(s2) assert(array_equal(s1.coordinates, [[10, 10], [10, 20], [10, 30], [10, 40]])) assert(array_equal(s1.values, [1.0, 2.0, 4.0, 5.0])) s1 = Source([[10, 10], [10, 20]]) s2 = Source([[10, 30], [10, 40]]) s1.merge(s2) assert(array_equal(s1.coordinates, [[10, 10], [10, 20], [10, 30], [10, 40]])) def test_inbounds(self): """ (SourceMethods) in bounds """ # two dimensional s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(s.inbounds([0, 0], [20, 20]) == 1) assert(s.inbounds([0, 0], [10, 10]) == 0.5) assert(s.inbounds([15, 15], [20, 20]) == 0) # three dimensional s = Source([[10, 10, 10], [10, 20, 20]], values=[1.0, 2.0]) assert(s.inbounds([0, 0, 0], [20, 20, 20]) == 1) assert(s.inbounds([0, 0, 0], [10, 10, 20]) == 0.5) assert(s.inbounds([15, 15, 15], [20, 20, 20]) == 0) def test_crop(self): """ (SourceMethods) crop """ # without values s = Source([[10, 10], [10, 20]]) assert(array_equal(s.crop([0, 0], [21, 21]).coordinates, s.coordinates)) assert(array_equal(s.crop([0, 0], [11, 11]).coordinates, [[10, 10]])) assert(array_equal(s.crop([0, 0], [5, 5]).coordinates, [])) # with values (two dimensional) s = Source([[10, 10], [10, 20]]) assert(array_equal(s.crop([0, 0], [21, 21]).coordinates, s.coordinates)) assert(array_equal(s.crop([0, 0], [11, 11]).coordinates, [[10, 10]])) assert(array_equal(s.crop([0, 0], [5, 5]).coordinates, [])) def test_exclude(self): """ (SourceMethods) exclude """ # without values s = Source([[10, 10], [10, 20]]) o = Source([[10, 20]]) assert(array_equal(s.exclude(o).coordinates, [[10, 10]])) # with values (two dimensional) s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) o = Source([[10, 20]]) assert(array_equal(s.exclude(o).coordinates, [[10, 10]])) assert(array_equal(s.exclude(o).values, [1])) # with values (three dimensional) s = Source([[10, 10, 10], [10, 20, 20]], values=[1.0, 2.0]) o = Source([[10, 20, 20]]) assert(array_equal(s.exclude(o).coordinates, [[10, 10, 10]])) assert(array_equal(s.exclude(o).values, [1.0])) def test_dilate(self): """ (SourceMethods) dilate """ # make base source m = zeros((10, 10)) m[5, 5] = 1 m[5, 6] = 1 m[6, 5] = 1 m[4, 5] = 1 m[5, 4] = 1 coords = asarray(where(m)).T s = Source(coords) # dilating by 0 doesn't change anything assert(array_equal(s.dilate(0).coordinates, s.coordinates)) assert(array_equal(s.dilate(0).bbox, [4, 4, 6, 6])) # dilating by 1 expands region but doesn't affect center assert(array_equal(s.dilate(1).center, s.center)) assert(array_equal(s.dilate(1).area, 21)) assert(array_equal(s.dilate(1).bbox, [3, 3, 7, 7])) assert(array_equal(s.dilate(1).mask().shape, [5, 5])) # manually construct expected shape of dilated source mask truth = ones((5, 5)) truth[0, 0] = 0 truth[4, 4] = 0 truth[0, 4] = 0 truth[4, 0] = 0 assert(array_equal(s.dilate(1).mask(), truth)) def test_outline(self): """ (SourceMethods) outline """ # make base source m = zeros((10, 10)) m[5, 5] = 1 m[5, 6] = 1 m[6, 5] = 1 m[4, 5] = 1 m[5, 4] = 1 coords = asarray(where(m)).T s = Source(coords) # compare outlines to manual results o1 = s.outline(0, 1).mask((10, 10)) o2 = s.dilate(1).mask((10, 10)) - s.mask((10, 10)) assert(array_equal(o1, o2)) o1 = s.outline(1, 2).mask((10, 10)) o2 = s.dilate(2).mask((10, 10)) - s.dilate(1).mask((10, 10)) assert(array_equal(o1, o2)) def test_overlap(self): """ (SourceMethods) overlap """ s1 = Source([[0, 0], [0, 1], [0, 2]], values=[0, 1, 2]) s2 = Source([[0, 1], [0, 2], [0, 3]], values=[1, 2, 3]) assert(s1.overlap(s2, 'fraction') == 0.5) assert(allclose(s1.overlap(s2, 'rates'), [2.0/3.0, 2.0/3.0])) assert(s1.overlap(s2, 'correlation') == 1.0) class TestSourceConversion(LocalTestCase): def test_to_list(self): """ (SourceConversion) to list """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) getattr(s, "center") assert(isinstance(s.tolist().center, list)) getattr(s, "bbox") assert(isinstance(s.tolist().bbox, list)) def test_to_array(self): """ (SourceConversion) to array """ s = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(isinstance(s.toarray().center, ndarray)) assert(isinstance(s.tolist().toarray().center, ndarray)) assert(isinstance(s.tolist().toarray().bbox, ndarray)) class TestSourceComparison(LocalTestCase): def test_distance_source(self): """ (SourceComparison) distance to source """ s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) s2 = Source([[20, 20], [20, 30]], values=[1.0, 2.0]) assert(s1.distance(s2) == sqrt(200)) def test_distance_array(self): """ (SourceComparison) distance to array """ s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0]) assert(s1.distance([20, 25]) == sqrt(200)) assert(s1.distance(array([20, 25])) == sqrt(200)) class TestSourceModelComparison(LocalTestCase): def test_match_sources(self): """ (SourceModelComparison) matching sources """ s1 = Source([[10, 10], [10, 20]]) s2 = Source([[20, 20], [20, 30]]) s3 = Source([[20, 20], [20, 30]]) s4 = Source([[10, 10], [10, 20]]) s5 = Source([[15, 15], [15, 20]]) sm1 = SourceModel([s1, s2]) sm2 = SourceModel([s3, s4, s5]) assert(sm1.match(sm2) == [1, 0]) assert(sm2.match(sm1) == [1, 0, 0])
unknown
codeparrot/codeparrot-clean
from django.forms.widgets import Textarea from django.template import loader, Context from django.templatetags.static import static from django.utils import translation from django.contrib.gis.gdal import OGRException from django.contrib.gis.geos import GEOSGeometry, GEOSException # Creating a template context that contains Django settings # values needed by admin map templates. geo_context = Context({'LANGUAGE_BIDI' : translation.get_language_bidi()}) class OpenLayersWidget(Textarea): """ Renders an OpenLayers map using the WKT of the geometry. """ def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs) # Defaulting the WKT value to a blank string -- this # will be tested in the JavaScript and the appropriate # interface will be constructed. self.params['wkt'] = '' # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, basestring): try: value = GEOSGeometry(value) except (GEOSException, ValueError): value = None if value and value.geom_type.upper() != self.geom_type: value = None # Constructing the dictionary of the map options. self.params['map_options'] = self.map_options() # Constructing the JavaScript module name using the name of # the GeometryField (passed in via the `attrs` keyword). # Use the 'name' attr for the field name (rather than 'field') self.params['name'] = name # note: we must switch out dashes for underscores since js # functions are created using the module variable js_safe_name = self.params['name'].replace('-','_') self.params['module'] = 'geodjango_%s' % js_safe_name if value: # Transforming the geometry to the projection used on the # OpenLayers map. srid = self.params['srid'] if value.srid != srid: try: ogr = value.ogr ogr.transform(srid) wkt = ogr.wkt except OGRException: wkt = '' else: wkt = value.wkt # Setting the parameter WKT with that of the transformed # geometry. self.params['wkt'] = wkt return loader.render_to_string(self.template, self.params, context_instance=geo_context) def map_options(self): "Builds the map options hash for the OpenLayers template." # JavaScript construction utilities for the Bounds and Projection. def ol_bounds(extent): return 'new OpenLayers.Bounds(%s)' % str(extent) def ol_projection(srid): return 'new OpenLayers.Projection("EPSG:%s")' % srid # An array of the parameter name, the name of their OpenLayers # counterpart, and the type of variable they are. map_types = [('srid', 'projection', 'srid'), ('display_srid', 'displayProjection', 'srid'), ('units', 'units', str), ('max_resolution', 'maxResolution', float), ('max_extent', 'maxExtent', 'bounds'), ('num_zoom', 'numZoomLevels', int), ('max_zoom', 'maxZoomLevels', int), ('min_zoom', 'minZoomLevel', int), ] # Building the map options hash. map_options = {} for param_name, js_name, option_type in map_types: if self.params.get(param_name, False): if option_type == 'srid': value = ol_projection(self.params[param_name]) elif option_type == 'bounds': value = ol_bounds(self.params[param_name]) elif option_type in (float, int): value = self.params[param_name] elif option_type in (str,): value = '"%s"' % self.params[param_name] else: raise TypeError map_options[js_name] = value return map_options
unknown
codeparrot/codeparrot-clean
from typing import Dict, Type from mlagents.trainers.exception import UnityTrainerException from mlagents.trainers.settings import RewardSignalSettings, RewardSignalType from mlagents.trainers.torch.components.reward_providers.base_reward_provider import ( BaseRewardProvider, ) from mlagents.trainers.torch.components.reward_providers.extrinsic_reward_provider import ( ExtrinsicRewardProvider, ) from mlagents.trainers.torch.components.reward_providers.curiosity_reward_provider import ( CuriosityRewardProvider, ) from mlagents.trainers.torch.components.reward_providers.gail_reward_provider import ( GAILRewardProvider, ) from mlagents.trainers.torch.components.reward_providers.rnd_reward_provider import ( RNDRewardProvider, ) from mlagents_envs.base_env import BehaviorSpec NAME_TO_CLASS: Dict[RewardSignalType, Type[BaseRewardProvider]] = { RewardSignalType.EXTRINSIC: ExtrinsicRewardProvider, RewardSignalType.CURIOSITY: CuriosityRewardProvider, RewardSignalType.GAIL: GAILRewardProvider, RewardSignalType.RND: RNDRewardProvider, } def create_reward_provider( name: RewardSignalType, specs: BehaviorSpec, settings: RewardSignalSettings ) -> BaseRewardProvider: """ Creates a reward provider class based on the name and config entry provided as a dict. :param name: The name of the reward signal :param specs: The BehaviorSpecs of the policy :param settings: The RewardSignalSettings for that reward signal :return: The reward signal class instantiated """ rcls = NAME_TO_CLASS.get(name) if not rcls: raise UnityTrainerException(f"Unknown reward signal type {name}") class_inst = rcls(specs, settings) return class_inst
unknown
codeparrot/codeparrot-clean
(function(QUnit) { var view; QUnit.module('Backbone.View', { beforeEach: function() { $('#qunit-fixture').append( '<div id="testElement"><h1>Test</h1></div>' ); view = new Backbone.View({ id: 'test-view', className: 'test-view', other: 'non-special-option' }); }, afterEach: function() { $('#testElement').remove(); $('#test-view').remove(); } }); QUnit.test('constructor', function(assert) { assert.expect(3); assert.equal(view.el.id, 'test-view'); assert.equal(view.el.className, 'test-view'); assert.equal(view.el.other, void 0); }); QUnit.test('$', function(assert) { assert.expect(2); var myView = new Backbone.View; myView.setElement('<p><a><b>test</b></a></p>'); var result = myView.$('a b'); assert.strictEqual(result[0].innerHTML, 'test'); assert.ok(result.length === +result.length); }); QUnit.test('$el', function(assert) { assert.expect(3); var myView = new Backbone.View; myView.setElement('<p><a><b>test</b></a></p>'); assert.strictEqual(myView.el.nodeType, 1); assert.ok(myView.$el instanceof Backbone.$); assert.strictEqual(myView.$el[0], myView.el); }); QUnit.test('initialize', function(assert) { assert.expect(1); var View = Backbone.View.extend({ initialize: function() { this.one = 1; } }); assert.strictEqual(new View().one, 1); }); QUnit.test('preinitialize', function(assert) { assert.expect(1); var View = Backbone.View.extend({ preinitialize: function() { this.one = 1; } }); assert.strictEqual(new View().one, 1); }); QUnit.test('preinitialize occurs before the view is set up', function(assert) { assert.expect(2); var View = Backbone.View.extend({ preinitialize: function() { assert.equal(this.el, undefined); } }); var _view = new View({}); assert.notEqual(_view.el, undefined); }); QUnit.test('render', function(assert) { assert.expect(1); var myView = new Backbone.View; assert.equal(myView.render(), myView, '#render returns the view instance'); }); QUnit.test('delegateEvents', function(assert) { assert.expect(6); var counter1 = 0, counter2 = 0; var myView = new Backbone.View({el: '#testElement'}); myView.increment = function() { counter1++; }; myView.$el.on('click', function() { counter2++; }); var events = {'click h1': 'increment'}; myView.delegateEvents(events); myView.$('h1').trigger('click'); assert.equal(counter1, 1); assert.equal(counter2, 1); myView.$('h1').trigger('click'); assert.equal(counter1, 2); assert.equal(counter2, 2); myView.delegateEvents(events); myView.$('h1').trigger('click'); assert.equal(counter1, 3); assert.equal(counter2, 3); }); QUnit.test('delegate', function(assert) { assert.expect(3); var myView = new Backbone.View({el: '#testElement'}); myView.delegate('click', 'h1', function() { assert.ok(true); }); myView.delegate('click', function() { assert.ok(true); }); myView.$('h1').trigger('click'); assert.equal(myView.delegate(), myView, '#delegate returns the view instance'); }); QUnit.test('delegateEvents allows functions for callbacks', function(assert) { assert.expect(3); var myView = new Backbone.View({el: '<p></p>'}); myView.counter = 0; var events = { click: function() { this.counter++; } }; myView.delegateEvents(events); myView.$el.trigger('click'); assert.equal(myView.counter, 1); myView.$el.trigger('click'); assert.equal(myView.counter, 2); myView.delegateEvents(events); myView.$el.trigger('click'); assert.equal(myView.counter, 3); }); QUnit.test('delegateEvents ignore undefined methods', function(assert) { assert.expect(0); var myView = new Backbone.View({el: '<p></p>'}); myView.delegateEvents({click: 'undefinedMethod'}); myView.$el.trigger('click'); }); QUnit.test('undelegateEvents', function(assert) { assert.expect(7); var counter1 = 0, counter2 = 0; var myView = new Backbone.View({el: '#testElement'}); myView.increment = function() { counter1++; }; myView.$el.on('click', function() { counter2++; }); var events = {'click h1': 'increment'}; myView.delegateEvents(events); myView.$('h1').trigger('click'); assert.equal(counter1, 1); assert.equal(counter2, 1); myView.undelegateEvents(); myView.$('h1').trigger('click'); assert.equal(counter1, 1); assert.equal(counter2, 2); myView.delegateEvents(events); myView.$('h1').trigger('click'); assert.equal(counter1, 2); assert.equal(counter2, 3); assert.equal(myView.undelegateEvents(), myView, '#undelegateEvents returns the view instance'); }); QUnit.test('undelegate', function(assert) { assert.expect(1); var myView = new Backbone.View({el: '#testElement'}); myView.delegate('click', function() { assert.ok(false); }); myView.delegate('click', 'h1', function() { assert.ok(false); }); myView.undelegate('click'); myView.$('h1').trigger('click'); myView.$el.trigger('click'); assert.equal(myView.undelegate(), myView, '#undelegate returns the view instance'); }); QUnit.test('undelegate with passed handler', function(assert) { assert.expect(1); var myView = new Backbone.View({el: '#testElement'}); var listener = function() { assert.ok(false); }; myView.delegate('click', listener); myView.delegate('click', function() { assert.ok(true); }); myView.undelegate('click', listener); myView.$el.trigger('click'); }); QUnit.test('undelegate with selector', function(assert) { assert.expect(2); var myView = new Backbone.View({el: '#testElement'}); myView.delegate('click', function() { assert.ok(true); }); myView.delegate('click', 'h1', function() { assert.ok(false); }); myView.undelegate('click', 'h1'); myView.$('h1').trigger('click'); myView.$el.trigger('click'); }); QUnit.test('undelegate with handler and selector', function(assert) { assert.expect(2); var myView = new Backbone.View({el: '#testElement'}); myView.delegate('click', function() { assert.ok(true); }); var handler = function() { assert.ok(false); }; myView.delegate('click', 'h1', handler); myView.undelegate('click', 'h1', handler); myView.$('h1').trigger('click'); myView.$el.trigger('click'); }); QUnit.test('tagName can be provided as a string', function(assert) { assert.expect(1); var View = Backbone.View.extend({ tagName: 'span' }); assert.equal(new View().el.tagName, 'SPAN'); }); QUnit.test('tagName can be provided as a function', function(assert) { assert.expect(1); var View = Backbone.View.extend({ tagName: function() { return 'p'; } }); assert.ok(new View().$el.is('p')); }); QUnit.test('_ensureElement with DOM node el', function(assert) { assert.expect(1); var View = Backbone.View.extend({ el: document.body }); assert.equal(new View().el, document.body); }); QUnit.test('_ensureElement with string el', function(assert) { assert.expect(3); var View = Backbone.View.extend({ el: 'body' }); assert.strictEqual(new View().el, document.body); View = Backbone.View.extend({ el: '#testElement > h1' }); assert.strictEqual(new View().el, $('#testElement > h1').get(0)); View = Backbone.View.extend({ el: '#nonexistent' }); assert.ok(!new View().el); }); QUnit.test('with className and id functions', function(assert) { assert.expect(2); var View = Backbone.View.extend({ className: function() { return 'className'; }, id: function() { return 'id'; } }); assert.strictEqual(new View().el.className, 'className'); assert.strictEqual(new View().el.id, 'id'); }); QUnit.test('with attributes', function(assert) { assert.expect(2); var View = Backbone.View.extend({ attributes: { 'id': 'id', 'class': 'class' } }); assert.strictEqual(new View().el.className, 'class'); assert.strictEqual(new View().el.id, 'id'); }); QUnit.test('with attributes as a function', function(assert) { assert.expect(1); var View = Backbone.View.extend({ attributes: function() { return {'class': 'dynamic'}; } }); assert.strictEqual(new View().el.className, 'dynamic'); }); QUnit.test('should default to className/id properties', function(assert) { assert.expect(4); var View = Backbone.View.extend({ className: 'backboneClass', id: 'backboneId', attributes: { 'class': 'attributeClass', 'id': 'attributeId' } }); var myView = new View; assert.strictEqual(myView.el.className, 'backboneClass'); assert.strictEqual(myView.el.id, 'backboneId'); assert.strictEqual(myView.$el.attr('class'), 'backboneClass'); assert.strictEqual(myView.$el.attr('id'), 'backboneId'); }); QUnit.test('multiple views per element', function(assert) { assert.expect(3); var count = 0; var $el = $('<p></p>'); var View = Backbone.View.extend({ el: $el, events: { click: function() { count++; } } }); var view1 = new View; $el.trigger('click'); assert.equal(1, count); var view2 = new View; $el.trigger('click'); assert.equal(3, count); view1.delegateEvents(); $el.trigger('click'); assert.equal(5, count); }); QUnit.test('custom events', function(assert) { assert.expect(2); var View = Backbone.View.extend({ el: $('body'), events: { fake$event: function() { assert.ok(true); } } }); var myView = new View; $('body').trigger('fake$event').trigger('fake$event'); $('body').off('fake$event'); $('body').trigger('fake$event'); }); QUnit.test('#1048 - setElement uses provided object.', function(assert) { assert.expect(2); var $el = $('body'); var myView = new Backbone.View({el: $el}); assert.ok(myView.$el === $el); myView.setElement($el = $($el)); assert.ok(myView.$el === $el); }); QUnit.test('#986 - Undelegate before changing element.', function(assert) { assert.expect(1); var button1 = $('<button></button>'); var button2 = $('<button></button>'); var View = Backbone.View.extend({ events: { click: function(e) { assert.ok(myView.el === e.target); } } }); var myView = new View({el: button1}); myView.setElement(button2); button1.trigger('click'); button2.trigger('click'); }); QUnit.test('#1172 - Clone attributes object', function(assert) { assert.expect(2); var View = Backbone.View.extend({ attributes: {foo: 'bar'} }); var view1 = new View({id: 'foo'}); assert.strictEqual(view1.el.id, 'foo'); var view2 = new View(); assert.ok(!view2.el.id); }); QUnit.test('views stopListening', function(assert) { assert.expect(0); var View = Backbone.View.extend({ initialize: function() { this.listenTo(this.model, 'all x', function() { assert.ok(false); }); this.listenTo(this.collection, 'all x', function() { assert.ok(false); }); } }); var myView = new View({ model: new Backbone.Model, collection: new Backbone.Collection }); myView.stopListening(); myView.model.trigger('x'); myView.collection.trigger('x'); }); QUnit.test('Provide function for el.', function(assert) { assert.expect(2); var View = Backbone.View.extend({ el: function() { return '<p><a></a></p>'; } }); var myView = new View; assert.ok(myView.$el.is('p')); assert.ok(myView.$el.has('a')); }); QUnit.test('events passed in options', function(assert) { assert.expect(1); var counter = 0; var View = Backbone.View.extend({ el: '#testElement', increment: function() { counter++; } }); var myView = new View({ events: { 'click h1': 'increment' } }); myView.$('h1').trigger('click').trigger('click'); assert.equal(counter, 2); }); QUnit.test('remove', function(assert) { assert.expect(2); var myView = new Backbone.View; document.body.appendChild(view.el); myView.delegate('click', function() { assert.ok(false); }); myView.listenTo(myView, 'all x', function() { assert.ok(false); }); assert.equal(myView.remove(), myView, '#remove returns the view instance'); myView.$el.trigger('click'); myView.trigger('x'); // In IE8 and below, parentNode still exists but is not document.body. assert.notEqual(myView.el.parentNode, document.body); }); QUnit.test('setElement', function(assert) { assert.expect(3); var myView = new Backbone.View({ events: { click: function() { assert.ok(false); } } }); myView.events = { click: function() { assert.ok(true); } }; var oldEl = myView.el; var $oldEl = myView.$el; myView.setElement(document.createElement('div')); $oldEl.click(); myView.$el.click(); assert.notEqual(oldEl, myView.el); assert.notEqual($oldEl, myView.$el); }); })(QUnit);
javascript
github
https://github.com/lodash/lodash
vendor/backbone/test/view.js
// Copyright 2017 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package cli import ( "encoding/json" "fmt" "os" "path/filepath" "regexp" "strings" "testing" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/log/logpb" "github.com/cockroachdb/cockroach/pkg/util/protoutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/stretchr/testify/require" ) // TestSendKVBatchExample is a simple example of generating Protobuf-compatible // JSON for a BatchRequest doing a Put and then Get of a key. func TestSendKVBatchExample(t *testing.T) { defer leaktest.AfterTest(t)() var ba kvpb.BatchRequest ba.Add(kvpb.NewPut(roachpb.Key("foo"), roachpb.MakeValueFromString("bar"))) ba.Add(kvpb.NewGet(roachpb.Key("foo"))) // NOTE: This cannot be marshaled using the standard Go JSON marshaler, // since it does not correctly (un)marshal the JSON as mandated by the // Protobuf spec. Instead, use the JSON marshaler shipped with Protobuf. jsonpb := protoutil.JSONPb{} jsonProto, err := jsonpb.Marshal(&ba) require.NoError(t, err) fmt.Println(string(jsonProto)) } func TestSendKVBatch(t *testing.T) { defer leaktest.AfterTest(t)() // This sets the key "foo" to the value "bar", and reads back the result. jsonManual := []byte(` {"requests": [ {"put": { "header": {"key": "Zm9v"}, "value": {"raw_bytes": "DMEB5ANiYXI="} }}, {"get": { "header": {"key": "Zm9v"} }} ]}`) // Alternatively, build a BatchRequest and marshal it. This might be // preferable for more complex requests. // // NOTE: This cannot be marshaled using the standard Go JSON marshaler, // since it does not correctly (un)marshal the JSON as mandated by the // Protobuf spec. Instead, use the JSON marshaler shipped with Protobuf. var ba kvpb.BatchRequest ba.Add(kvpb.NewPut(roachpb.Key("foo"), roachpb.MakeValueFromString("bar"))) ba.Add(kvpb.NewGet(roachpb.Key("foo"))) jsonpb := protoutil.JSONPb{} jsonProto, err := jsonpb.Marshal(&ba) require.NoError(t, err) // This is the expected response. We zero out any HLC timestamps before comparing. jsonResponse := ` { "header": { "Timestamp": {}, "now": {}, "cpuTime": {} }, "responses": [ {"put": { "header": {} }}, {"get": { "header": {"numKeys": "1", "numBytes": "8"}, "value": {"rawBytes": "DMEB5ANiYXI=", "timestamp": {}} }} ] }` // Run test both with manual and Protobuf-generated JSON. testutils.RunTrueAndFalse(t, "fromProto", func(t *testing.T, fromProto bool) { defer log.Scope(t).Close(t) start := timeutil.Now() // Save the JSON BatchRequest to batch.json. jsonRequest := jsonManual if fromProto { jsonRequest = jsonProto } path := filepath.Join(t.TempDir(), "batch.json") require.NoError(t, os.WriteFile(path, jsonRequest, 0644)) // Start a CLI test server and run 'debug send-kv-batch batch.json'. c := NewCLITest(TestCLIParams{T: t}) defer c.Cleanup() output, err := c.RunWithCapture("debug send-kv-batch " + path) require.NoError(t, err) // Clean and check the BatchResponse output, by removing first line // (contains input command), emptying out all HLC timestamp objects, // and zeroing out the cpuTime value. output = strings.SplitN(output, "\n", 2)[1] output = regexp.MustCompile(`(?s)\{\s*"wallTime":.*?\}`).ReplaceAllString(output, "{}") output = regexp.MustCompile(`"cpuTime"\s*:\s*("[^"]*"|\d+)`).ReplaceAllString(output, `"cpuTime": {}`) require.JSONEq(t, jsonResponse, output) // Check that a structured log event was emitted. log.FlushFiles() entries, err := log.FetchEntriesFromFiles(start.UnixNano(), timeutil.Now().UnixNano(), 1, regexp.MustCompile("debug_send_kv_batch"), log.WithFlattenedSensitiveData) require.NoError(t, err) require.Len(t, entries, 1) entry := entries[0] require.Equal(t, logpb.Severity_INFO, entry.Severity) require.Equal(t, logpb.Channel_OPS, entry.Channel) event := map[string]interface{}{} require.NoError(t, json.Unmarshal([]byte(entry.Message[entry.StructuredStart:]), &event)) require.EqualValues(t, "debug_send_kv_batch", event["EventType"]) require.EqualValues(t, "root", event["User"]) require.EqualValues(t, 1, event["NodeID"]) // Check that the log entry contains the BatchRequest as JSON, following // a Protobuf marshaling roundtrip (for normalization). var ba kvpb.BatchRequest require.NoError(t, jsonpb.Unmarshal(jsonRequest, &ba)) expectLogJSON, err := jsonpb.Marshal(&ba) require.NoError(t, err) require.JSONEq(t, string(expectLogJSON), event["BatchRequest"].(string), "structured log entry contains unexpected BatchRequest") }) } func TestSendKVBatchTrace(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) c := NewCLITest(TestCLIParams{T: t}) defer c.Cleanup() reqJSON := `{"requests": [{"get": {"header": {"key": "Zm9v"}}}]}` path := filepath.Join(t.TempDir(), "batch.json") require.NoError(t, os.WriteFile(path, []byte(reqJSON), 0644)) // text mode, output to stderr. output, err := c.RunWithCapture("debug send-kv-batch --trace=text " + path) require.NoError(t, err) require.Contains(t, output, "=== operation:/cockroach.roachpb.Internal/Batch") // jaeger mode, output to stderr. output, err = c.RunWithCapture("debug send-kv-batch --trace=jaeger " + path) require.NoError(t, err) require.Contains(t, output, `"operationName": "/cockroach.roachpb.Internal/Batch",`) traceOut := filepath.Join(t.TempDir(), "trace.out") // text mode, output to file. _, err = c.RunWithCapture("debug send-kv-batch --trace=text --trace-output=" + traceOut + " " + path) require.NoError(t, err) b, err := os.ReadFile(traceOut) require.NoError(t, err) require.Contains(t, string(b), "=== operation:/cockroach.roachpb.Internal/Batch") // jaeger mode, output to file. _, err = c.RunWithCapture("debug send-kv-batch --trace=jaeger --trace-output=" + traceOut + " " + path) require.NoError(t, err) b, err = os.ReadFile(traceOut) require.NoError(t, err) require.Contains(t, string(b), `"operationName": "/cockroach.roachpb.Internal/Batch",`) } func TestSendKVBatchErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) c := NewCLITest(TestCLIParams{T: t}) defer c.Cleanup() reqJSON := `{"requests": [{"get": {"header": {"key": "Zm9v"}}}]}` path := filepath.Join(t.TempDir(), "batch.json") require.NoError(t, os.WriteFile(path, []byte(reqJSON), 0644)) // Insecure connection should error. output, err := c.RunWithCapture("debug send-kv-batch --insecure " + path) require.NoError(t, err) require.Contains(t, output, "ERROR: failed to connect") // Invalid trace mode should error. output, err = c.RunWithCapture("debug send-kv-batch --trace=unknown " + path) require.NoError(t, err) require.Contains(t, output, "ERROR: unknown --trace value") // Invalid trace output file should error. output, err = c.RunWithCapture("debug send-kv-batch --trace=on --trace-output=invalid/. " + path) require.NoError(t, err) require.Contains(t, output, "ERROR: open invalid/.: no such file or directory") // Invalid JSON should error. require.NoError(t, os.WriteFile(path, []byte("{invalid"), 0644)) output, err = c.RunWithCapture("debug send-kv-batch " + path) require.NoError(t, err) require.Contains(t, output, "ERROR: invalid JSON") // Unknown JSON field should error. require.NoError(t, os.WriteFile(path, []byte(`{"unknown": null}`), 0644)) output, err = c.RunWithCapture("debug send-kv-batch " + path) require.NoError(t, err) require.Contains(t, output, "ERROR: invalid JSON") }
go
github
https://github.com/cockroachdb/cockroach
pkg/cli/debug_send_kv_batch_test.go
"""Test that sys.modules is used properly by import.""" from .. import util from . import util as import_util import sys from types import MethodType import unittest class UseCache(unittest.TestCase): """When it comes to sys.modules, import prefers it over anything else. Once a name has been resolved, sys.modules is checked to see if it contains the module desired. If so, then it is returned [use cache]. If it is not found, then the proper steps are taken to perform the import, but sys.modules is still used to return the imported module (e.g., not what a loader returns) [from cache on return]. This also applies to imports of things contained within a package and thus get assigned as an attribute [from cache to attribute] or pulled in thanks to a fromlist import [from cache for fromlist]. But if sys.modules contains None then ImportError is raised [None in cache]. """ def test_using_cache(self): # [use cache] module_to_use = "some module found!" with util.uncache(module_to_use): sys.modules['some_module'] = module_to_use module = import_util.import_('some_module') self.assertEqual(id(module_to_use), id(module)) def test_None_in_cache(self): #[None in cache] name = 'using_None' with util.uncache(name): sys.modules[name] = None with self.assertRaises(ImportError): import_util.import_(name) def create_mock(self, *names, return_=None): mock = util.mock_modules(*names) original_load = mock.load_module def load_module(self, fullname): original_load(fullname) return return_ mock.load_module = MethodType(load_module, mock) return mock # __import__ inconsistent between loaders and built-in import when it comes # to when to use the module in sys.modules and when not to. @import_util.importlib_only def test_using_cache_after_loader(self): # [from cache on return] with self.create_mock('module') as mock: with util.import_state(meta_path=[mock]): module = import_util.import_('module') self.assertEqual(id(module), id(sys.modules['module'])) # See test_using_cache_after_loader() for reasoning. @import_util.importlib_only def test_using_cache_for_assigning_to_attribute(self): # [from cache to attribute] with self.create_mock('pkg.__init__', 'pkg.module') as importer: with util.import_state(meta_path=[importer]): module = import_util.import_('pkg.module') self.assertTrue(hasattr(module, 'module')) self.assertTrue(id(module.module), id(sys.modules['pkg.module'])) # See test_using_cache_after_loader() for reasoning. @import_util.importlib_only def test_using_cache_for_fromlist(self): # [from cache for fromlist] with self.create_mock('pkg.__init__', 'pkg.module') as importer: with util.import_state(meta_path=[importer]): module = import_util.import_('pkg', fromlist=['module']) self.assertTrue(hasattr(module, 'module')) self.assertEqual(id(module.module), id(sys.modules['pkg.module'])) def test_main(): from test.support import run_unittest run_unittest(UseCache) if __name__ == '__main__': test_main()
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import, unicode_literals from django.template import engines from django.template.loader import render_to_string from django.test import TestCase from wagtail import __version__ from wagtail.tests.testapp.blocks import SectionBlock from wagtail.wagtailcore import blocks from wagtail.wagtailcore.models import Page, Site class TestCoreGlobalsAndFilters(TestCase): def setUp(self): self.engine = engines['jinja2'] def render(self, string, context=None, request_context=True): if context is None: context = {} # Add a request to the template, to simulate a RequestContext if request_context: site = Site.objects.get(is_default_site=True) request = self.client.get('/test/', HTTP_HOST=site.hostname) request.site = site context['request'] = request template = self.engine.from_string(string) return template.render(context) def test_richtext(self): richtext = '<p>Merry <a linktype="page" id="2">Christmas</a>!</p>' self.assertEqual( self.render('{{ text|richtext }}', {'text': richtext}), '<div class="rich-text"><p>Merry <a href="/">Christmas</a>!</p></div>') def test_pageurl(self): page = Page.objects.get(pk=2) self.assertEqual( self.render('{{ pageurl(page) }}', {'page': page}), page.url) def test_slugurl(self): page = Page.objects.get(pk=2) self.assertEqual( self.render('{{ slugurl(page.slug) }}', {'page': page}), page.url) def test_wagtail_version(self): self.assertEqual( self.render('{{ wagtail_version() }}'), __version__) class TestJinjaEscaping(TestCase): fixtures = ['test.json'] def test_block_render_result_is_safe(self): """ Ensure that any results of template rendering in block.render are marked safe so that they don't get double-escaped when inserted into a parent template (#2541) """ stream_block = blocks.StreamBlock([ ('paragraph', blocks.CharBlock(template='tests/jinja2/paragraph.html')) ]) stream_value = stream_block.to_python([ {'type': 'paragraph', 'value': 'hello world'}, ]) result = render_to_string('tests/jinja2/stream.html', { 'value': stream_value, }) self.assertIn('<p>hello world</p>', result) def test_rich_text_is_safe(self): """ Ensure that RichText values are marked safe so that they don't get double-escaped when inserted into a parent template (#2542) """ stream_block = blocks.StreamBlock([ ('paragraph', blocks.RichTextBlock(template='tests/jinja2/rich_text.html')) ]) stream_value = stream_block.to_python([ {'type': 'paragraph', 'value': '<p>Merry <a linktype="page" id="4">Christmas</a>!</p>'}, ]) result = render_to_string('tests/jinja2/stream.html', { 'value': stream_value, }) self.assertIn('<div class="rich-text"><p>Merry <a href="/events/christmas/">Christmas</a>!</p></div>', result) class TestIncludeBlockTag(TestCase): def test_include_block_tag_with_boundblock(self): """ The include_block tag should be able to render a BoundBlock's template while keeping the parent template's context """ block = blocks.CharBlock(template='tests/jinja2/heading_block.html') bound_block = block.bind('bonjour') result = render_to_string('tests/jinja2/include_block_test.html', { 'test_block': bound_block, 'language': 'fr', }) self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result) def test_include_block_tag_with_structvalue(self): """ The include_block tag should be able to render a StructValue's template while keeping the parent template's context """ block = SectionBlock() struct_value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'}) result = render_to_string('tests/jinja2/include_block_test.html', { 'test_block': struct_value, 'language': 'fr', }) self.assertIn( """<body><h1 lang="fr">Bonjour</h1><div class="rich-text">monde <i>italique</i></div></body>""", result ) def test_include_block_tag_with_streamvalue(self): """ The include_block tag should be able to render a StreamValue's template while keeping the parent template's context """ block = blocks.StreamBlock([ ('heading', blocks.CharBlock(template='tests/jinja2/heading_block.html')), ('paragraph', blocks.CharBlock()), ], template='tests/jinja2/stream_with_language.html') stream_value = block.to_python([ {'type': 'heading', 'value': 'Bonjour'} ]) result = render_to_string('tests/jinja2/include_block_test.html', { 'test_block': stream_value, 'language': 'fr', }) self.assertIn('<div class="heading" lang="fr"><h1 lang="fr">Bonjour</h1></div>', result) def test_include_block_tag_with_plain_value(self): """ The include_block tag should be able to render a value without a render_as_block method by just rendering it as a string """ result = render_to_string('tests/jinja2/include_block_test.html', { 'test_block': 42, }) self.assertIn('<body>42</body>', result) def test_include_block_tag_with_filtered_value(self): """ The block parameter on include_block tag should support complex values including filters, e.g. {% include_block foo|default:123 %} """ block = blocks.CharBlock(template='tests/jinja2/heading_block.html') bound_block = block.bind('bonjour') result = render_to_string('tests/jinja2/include_block_test_with_filter.html', { 'test_block': bound_block, 'language': 'fr', }) self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result) result = render_to_string('tests/jinja2/include_block_test_with_filter.html', { 'test_block': None, 'language': 'fr', }) self.assertIn('<body>999</body>', result)
unknown
codeparrot/codeparrot-clean
import pytest from httpie.compat import is_windows from httpie.output.streams import BINARY_SUPPRESSED_NOTICE from utils import http, TestEnvironment from fixtures import BIN_FILE_CONTENT, BIN_FILE_PATH class TestStream: # GET because httpbin 500s with binary POST body. @pytest.mark.skipif(is_windows, reason='Pretty redirect not supported under Windows') def test_pretty_redirected_stream(self, httpbin): """Test that --stream works with prettified redirected output.""" with open(BIN_FILE_PATH, 'rb') as f: env = TestEnvironment(colors=256, stdin=f, stdin_isatty=False, stdout_isatty=False) r = http('--verbose', '--pretty=all', '--stream', 'GET', httpbin.url + '/get', env=env) assert BINARY_SUPPRESSED_NOTICE.decode() in r def test_encoded_stream(self, httpbin): """Test that --stream works with non-prettified redirected terminal output.""" with open(BIN_FILE_PATH, 'rb') as f: env = TestEnvironment(stdin=f, stdin_isatty=False) r = http('--pretty=none', '--stream', '--verbose', 'GET', httpbin.url + '/get', env=env) assert BINARY_SUPPRESSED_NOTICE.decode() in r def test_redirected_stream(self, httpbin): """Test that --stream works with non-prettified redirected terminal output.""" with open(BIN_FILE_PATH, 'rb') as f: env = TestEnvironment(stdout_isatty=False, stdin_isatty=False, stdin=f) r = http('--pretty=none', '--stream', '--verbose', 'GET', httpbin.url + '/get', env=env) assert BIN_FILE_CONTENT in r
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.DataInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.util.EnumSet; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.CreateOpts.BlockSize; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; /** * Helper class for unit tests. */ public final class FileSystemTestWrapper extends FSTestWrapper { private final FileSystem fs; public FileSystemTestWrapper(FileSystem fs) { this(fs, null); } public FileSystemTestWrapper(FileSystem fs, String rootDir) { super(rootDir); this.fs = fs; } public FSTestWrapper getLocalFSWrapper() throws IOException { return new FileSystemTestWrapper(FileSystem.getLocal(fs.getConf())); } public Path getDefaultWorkingDirectory() throws IOException { return getTestRootPath("/user/" + System.getProperty("user.name")) .makeQualified(fs.getUri(), fs.getWorkingDirectory()); } /* * Create files with numBlocks blocks each with block size blockSize. */ public long createFile(Path path, int numBlocks, CreateOpts... options) throws IOException { BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options); long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue() : DEFAULT_BLOCK_SIZE; FSDataOutputStream out = create(path, EnumSet.of(CreateFlag.CREATE), options); byte[] data = getFileData(numBlocks, blockSize); out.write(data, 0, data.length); out.close(); return data.length; } public long createFile(Path path, int numBlocks, int blockSize) throws IOException { return createFile(path, numBlocks, CreateOpts.blockSize(blockSize), CreateOpts.createParent()); } public long createFile(Path path) throws IOException { return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent()); } public long createFile(String name) throws IOException { Path path = getTestRootPath(name); return createFile(path); } public long createFileNonRecursive(String name) throws IOException { Path path = getTestRootPath(name); return createFileNonRecursive(path); } public long createFileNonRecursive(Path path) throws IOException { return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent()); } public void appendToFile(Path path, int numBlocks, CreateOpts... options) throws IOException { BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options); long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue() : DEFAULT_BLOCK_SIZE; FSDataOutputStream out; out = fs.append(path); byte[] data = getFileData(numBlocks, blockSize); out.write(data, 0, data.length); out.close(); } public boolean exists(Path p) throws IOException { return fs.exists(p); } public boolean isFile(Path p) throws IOException { try { return fs.getFileStatus(p).isFile(); } catch (FileNotFoundException e) { return false; } } public boolean isDir(Path p) throws IOException { try { return fs.getFileStatus(p).isDirectory(); } catch (FileNotFoundException e) { return false; } } public boolean isSymlink(Path p) throws IOException { try { return fs.getFileLinkStatus(p).isSymlink(); } catch (FileNotFoundException e) { return false; } } public void writeFile(Path path, byte b[]) throws IOException { FSDataOutputStream out = create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent()); out.write(b); out.close(); } public byte[] readFile(Path path, int len) throws IOException { DataInputStream dis = fs.open(path); byte[] buffer = new byte[len]; IOUtils.readFully(dis, buffer, 0, len); dis.close(); return buffer; } public FileStatus containsPath(Path path, FileStatus[] dirList) throws IOException { for(int i = 0; i < dirList.length; i ++) { if (path.equals(dirList[i].getPath())) return dirList[i]; } return null; } public FileStatus containsPath(String path, FileStatus[] dirList) throws IOException { return containsPath(new Path(path), dirList); } public void checkFileStatus(String path, fileType expectedType) throws IOException { FileStatus s = fs.getFileStatus(new Path(path)); assertNotNull(s); if (expectedType == fileType.isDir) { assertTrue(s.isDirectory()); } else if (expectedType == fileType.isFile) { assertTrue(s.isFile()); } else if (expectedType == fileType.isSymlink) { assertTrue(s.isSymlink()); } assertEquals(fs.makeQualified(new Path(path)), s.getPath()); } public void checkFileLinkStatus(String path, fileType expectedType) throws IOException { FileStatus s = fs.getFileLinkStatus(new Path(path)); assertNotNull(s); if (expectedType == fileType.isDir) { assertTrue(s.isDirectory()); } else if (expectedType == fileType.isFile) { assertTrue(s.isFile()); } else if (expectedType == fileType.isSymlink) { assertTrue(s.isSymlink()); } assertEquals(fs.makeQualified(new Path(path)), s.getPath()); } // // FileContext wrappers // @Override public Path makeQualified(Path path) { return fs.makeQualified(path); } @SuppressWarnings("deprecation") @Override public void mkdir(Path dir, FsPermission permission, boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { fs.primitiveMkdir(dir, permission, createParent); } @Override public boolean delete(Path f, boolean recursive) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.delete(f, recursive); } @Override public FileStatus getFileLinkStatus(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.getFileLinkStatus(f); } @Override public void createSymlink(Path target, Path link, boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { fs.createSymlink(target, link, createParent); } @Override public void setWorkingDirectory(Path newWDir) throws IOException { fs.setWorkingDirectory(newWDir); } @Override public Path getWorkingDirectory() { return fs.getWorkingDirectory(); } @Override public FileStatus getFileStatus(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.getFileStatus(f); } @Override public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag, CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { // Need to translate the FileContext-style options into FileSystem-style // Permissions with umask CreateOpts.Perms permOpt = CreateOpts.getOpt( CreateOpts.Perms.class, opts); FsPermission umask = FsPermission.getUMask(fs.getConf()); FsPermission permission = (permOpt != null) ? permOpt.getValue() : FsPermission.getFileDefault().applyUMask(umask); permission = permission.applyUMask(umask); // Overwrite boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE); // bufferSize int bufferSize = fs.getConf().getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); CreateOpts.BufferSize bufOpt = CreateOpts.getOpt( CreateOpts.BufferSize.class, opts); bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize; // replication short replication = fs.getDefaultReplication(f); CreateOpts.ReplicationFactor repOpt = CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts); replication = (repOpt != null) ? repOpt.getValue() : replication; // blockSize long blockSize = fs.getDefaultBlockSize(f); CreateOpts.BlockSize blockOpt = CreateOpts.getOpt( CreateOpts.BlockSize.class, opts); blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize; // Progressable Progressable progress = null; CreateOpts.Progress progressOpt = CreateOpts.getOpt( CreateOpts.Progress.class, opts); progress = (progressOpt != null) ? progressOpt.getValue() : progress; return fs.create(f, permission, overwrite, bufferSize, replication, blockSize, progress); } @Override public FSDataInputStream open(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.open(f); } @Override public Path getLinkTarget(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.getLinkTarget(f); } @Override public boolean setReplication(final Path f, final short replication) throws AccessControlException, FileNotFoundException, IOException { return fs.setReplication(f, replication); } @SuppressWarnings("deprecation") @Override public void rename(Path src, Path dst, Rename... options) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException { fs.rename(src, dst, options); } @Override public BlockLocation[] getFileBlockLocations(Path f, long start, long len) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.getFileBlockLocations(f, start, len); } @Override public FileChecksum getFileChecksum(Path f) throws AccessControlException, FileNotFoundException, IOException { return fs.getFileChecksum(f); } @Override public RemoteIterator<FileStatus> listStatusIterator(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.listStatusIterator(f); } @Override public void setPermission(final Path f, final FsPermission permission) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { fs.setPermission(f, permission); } @Override public void setOwner(final Path f, final String username, final String groupname) throws AccessControlException, UnsupportedFileSystemException, FileNotFoundException, IOException { fs.setOwner(f, username, groupname); } @Override public void setTimes(Path f, long mtime, long atime) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { fs.setTimes(f, mtime, atime); } @Override public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { return fs.listStatus(f); } @Override public FileStatus[] globStatus(Path pathPattern, PathFilter filter) throws IOException { return fs.globStatus(pathPattern, filter); } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java
# frozen_string_literal: true ActiveJob::Base.queue_adapter = :resque Resque.inline = true
ruby
github
https://github.com/rails/rails
activejob/test/adapters/resque.rb
{ // The version of the config file format. Do not change, unless // you know what you are doing. "version": 1, // The name of the project being benchmarked "project": "numpy", // The project's homepage "project_url": "https://numpy.org", // The URL or local path of the source code repository for the // project being benchmarked "repo": "..", // List of branches to benchmark. If not provided, defaults to "master" // (for git) or "tip" (for mercurial). "branches": ["HEAD"], "build_command": [ "python -m build --wheel -o {build_cache_dir} {build_dir}" ], // The DVCS being used. If not set, it will be automatically // determined from "repo" by looking at the protocol in the URL // (if remote), or by looking for special directories, such as // ".git" (if local). "dvcs": "git", // The tool to use to create environments. May be "conda", // "virtualenv" or other value depending on the plugins in use. // If missing or the empty string, the tool will be automatically // determined by looking for tools on the PATH environment // variable. "environment_type": "virtualenv", // the base URL to show a commit for the project. "show_commit_url": "https://github.com/numpy/numpy/commit/", // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. // "pythons": ["3.9"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty // list indicates to just test against the default (latest) // version. "matrix": { "Cython": [], "build": [], "packaging": [] }, // The directory (relative to the current directory) that benchmarks are // stored in. If not provided, defaults to "benchmarks" "benchmark_dir": "benchmarks", // The directory (relative to the current directory) to cache the Python // environments in. If not provided, defaults to "env" "env_dir": "env", // The directory (relative to the current directory) that raw benchmark // results are stored in. If not provided, defaults to "results". "results_dir": "results", // The directory (relative to the current directory) that the html tree // should be written to. If not provided, defaults to "html". "html_dir": "html", // The number of characters to retain in the commit hashes. // "hash_length": 8, // `asv` will cache wheels of the recent builds in each // environment, making them faster to install next time. This is // number of builds to keep, per environment. "build_cache_size": 8, // The commits after which the regression search in `asv publish` // should start looking for regressions. Dictionary whose keys are // regexps matching to benchmark names, and values corresponding to // the commit (exclusive) after which to start looking for // regressions. The default is to start from the first commit // with results. If the commit is `null`, regression detection is // skipped for the matching benchmark. // // "regressions_first_commits": { // "some_benchmark": "352cdf", // Consider regressions only after this commit // "another_benchmark": null, // Skip regression detection altogether // } }
json
github
https://github.com/numpy/numpy
benchmarks/asv.conf.json