code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
#!/bin/python3 """ Primitive calculator Given ops *3, *2, +1, what is the fewest ops to reach n from 1? """ import os import sys def main(): sequence = optimal_sequence_linear(int(input())) print(len(sequence) - 1) print(*sequence) def optimal_sequence_linear(n): """ Solve by calculating min-steps for each i in 1..n """ steps = [0] * (n + 1) # calculate array for i in range(1, n + 1): prev = [ steps[i - 1] ] if not i % 3: prev.append(steps[i // 3]) if not i % 2: prev.append(steps[i // 2]) steps[i] = min(prev) + 1 # now work backwards to find the solution seq = [] while n: seq.append(n) prev = [ (steps[n-1], n-1) ] if not n % 3: prev.append((steps[n // 3], n // 3)) if not n % 2: prev.append((steps[n // 2], n // 2)) prev.sort() n = prev[0][1] seq.reverse() return seq class SolutionFound(Exception): pass def optimal_sequence_bfs_nested(n): """ Solving this as BFS of a math ops DAG, each vertex with 3 edges. Whichever branch gets n to 1 first is the keeper. TOO SLOW. """ if n == 1: return [1] ops = [ lambda n: 0 if n % 3 else n // 3, lambda n: 0 if n % 2 else n // 2, lambda n: n - 1 ] solution = None queue = [ (n,) ] try: while True: previous = queue queue = [] for steps in previous: for new in [ op(steps[0]) for op in ops ]: if new == 1: solution = steps raise SolutionFound if new > 1: queue.append((new, steps)) except SolutionFound: pass sequence = [1] try: while True: sequence.append(solution[0]) solution = solution[1] except IndexError: pass return sequence if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
""" Defines miscellaneous Qt-related helper classes and functions. """ # Standard library imports. import inspect # System library imports. from IPython.external.qt import QtCore, QtGui # IPython imports. from IPython.utils.traitlets import HasTraits, TraitType #----------------------------------------------------------------------------- # Metaclasses #----------------------------------------------------------------------------- MetaHasTraits = type(HasTraits) MetaQObject = type(QtCore.QObject) class MetaQObjectHasTraits(MetaQObject, MetaHasTraits): """ A metaclass that inherits from the metaclasses of HasTraits and QObject. Using this metaclass allows a class to inherit from both HasTraits and QObject. Using SuperQObject instead of QObject is highly recommended. See QtKernelManager for an example. """ def __new__(mcls, name, bases, classdict): # FIXME: this duplicates the code from MetaHasTraits. # I don't think a super() call will help me here. for k,v in classdict.iteritems(): if isinstance(v, TraitType): v.name = k elif inspect.isclass(v): if issubclass(v, TraitType): vinst = v() vinst.name = k classdict[k] = vinst cls = MetaQObject.__new__(mcls, name, bases, classdict) return cls def __init__(mcls, name, bases, classdict): # Note: super() did not work, so we explicitly call these. MetaQObject.__init__(mcls, name, bases, classdict) MetaHasTraits.__init__(mcls, name, bases, classdict) #----------------------------------------------------------------------------- # Classes #----------------------------------------------------------------------------- class SuperQObject(QtCore.QObject): """ Permits the use of super() in class hierarchies that contain QObject. Unlike QObject, SuperQObject does not accept a QObject parent. If it did, super could not be emulated properly (all other classes in the heierarchy would have to accept the parent argument--they don't, of course, because they don't inherit QObject.) This class is primarily useful for attaching signals to existing non-Qt classes. See QtKernelManager for an example. """ def __new__(cls, *args, **kw): # We initialize QObject as early as possible. Without this, Qt complains # if SuperQObject is not the first class in the super class list. inst = QtCore.QObject.__new__(cls) QtCore.QObject.__init__(inst) return inst def __init__(self, *args, **kw): # Emulate super by calling the next method in the MRO, if there is one. mro = self.__class__.mro() for qt_class in QtCore.QObject.mro(): mro.remove(qt_class) next_index = mro.index(SuperQObject) + 1 if next_index < len(mro): mro[next_index].__init__(self, *args, **kw) #----------------------------------------------------------------------------- # Functions #----------------------------------------------------------------------------- def get_font(family, fallback=None): """Return a font of the requested family, using fallback as alternative. If a fallback is provided, it is used in case the requested family isn't found. If no fallback is given, no alternative is chosen and Qt's internal algorithms may automatically choose a fallback font. Parameters ---------- family : str A font name. fallback : str A font name. Returns ------- font : QFont object """ font = QtGui.QFont(family) # Check whether we got what we wanted using QFontInfo, since exactMatch() # is overly strict and returns false in too many cases. font_info = QtGui.QFontInfo(font) if fallback is not None and font_info.family() != family: font = QtGui.QFont(fallback) return font
unknown
codeparrot/codeparrot-clean
# Script Name : modifyOBIEEHOLUsers.py # Created by : Art of BI Software (artofbi.com) # Author : C.Screen # Date : 2013/10/01 ##--------------------------------------------------------- # NOTES ##--------------------------------------------------------- # 1. ... # ##--------------------------------------------------------- ##--------------------------------------------------------- # DO NOT EDIT BELOW THIS LINE UNLESS ADVISED BY SUPPORT ##--------------------------------------------------------- # -- Usage of arguments per getopt import (http://davidmichaelkarr.blogspot.com/2008/10/make-wlst-scripts-more-flexible-with.html) -- def usage(): print "Usage:" print "Must be used from the install command only" # -- Connect to WLS Server -- def connectToWLSAdmin(): try: connect(adminUserName, adminPassword, adminURL) print('Successfully connected') except: print 'Unable to find admin server...' exit() # -- Generate Users Application -- def modifyUserLoop() : try: i = int(userNamePrefixNumberStart) #for i in range(1000): while (i <= int(userNamePrefixNumberFinish)): userToModify = userNamePrefix + "%s" % str(i).zfill(int(useZeroPaddingCount)) print userToModify dauth=cmo.getSecurityConfiguration().getDefaultRealm().lookupAuthenticationProvider("DefaultAuthenticator") dauth.removeMemberFromGroup(groupToAssignUsers, userToModify) print "modify user at group " + groupToAssignUsers + "..." dauth.removeUser(userToModify) print "deleted user " + userToModify ### // if adding to a application role directly ###if len(appRoleToAssignUsers) > 1 ### grantAppRole("obi", groupToAssignUsers, "weblogic.security.principal.WLSUserImpl", userToModify) i = i + 1 print "User Modified" except: print "Exception Occurred. The user showing above may have some issues. Check the security realm." ############### Main Script ##################################### #if __name__ == "__main__": # import sys import sys import getopt adminURL=sys.argv[1] adminUserName=sys.argv[2] adminPassword=sys.argv[3] createOrRemove=sys.argv[4] userNamePrefix=sys.argv[5] userPassword=sys.argv[6] userNamePrefixNumberStart=sys.argv[7] userNamePrefixNumberFinish=sys.argv[8] groupToAssignUsers=sys.argv[9] useZeroPaddingCount=sys.argv[10] print "[INFO] Starting Script" connectToWLSAdmin() print "[INFO] Modifying Users..." modifyUserLoop() print "[INFO] Disconnecting..." disconnect() print "[INFO] End WLST Work" ####################################
unknown
codeparrot/codeparrot-clean
import inspect import os import re from importlib import import_module from django.apps import apps from django.conf import settings from django.contrib import admin from django.contrib.admin.views.decorators import staff_member_required from django.contrib.admindocs import utils from django.core import urlresolvers from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.db import models from django.http import Http404 from django.template.engine import Engine from django.utils.decorators import method_decorator from django.utils.inspect import func_has_no_args from django.utils.translation import ugettext as _ from django.views.generic import TemplateView # Exclude methods starting with these strings from documentation MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_') class BaseAdminDocsView(TemplateView): """ Base view for admindocs views. """ @method_decorator(staff_member_required) def dispatch(self, request, *args, **kwargs): if not utils.docutils_is_available: # Display an error message for people without docutils self.template_name = 'admin_doc/missing_docutils.html' return self.render_to_response(admin.site.each_context(request)) return super(BaseAdminDocsView, self).dispatch(request, *args, **kwargs) def get_context_data(self, **kwargs): kwargs.update({'root_path': urlresolvers.reverse('admin:index')}) kwargs.update(admin.site.each_context(self.request)) return super(BaseAdminDocsView, self).get_context_data(**kwargs) class BookmarkletsView(BaseAdminDocsView): template_name = 'admin_doc/bookmarklets.html' def get_context_data(self, **kwargs): context = super(BookmarkletsView, self).get_context_data(**kwargs) context.update({ 'admin_url': "%s://%s%s" % ( self.request.scheme, self.request.get_host(), context['root_path']) }) return context class TemplateTagIndexView(BaseAdminDocsView): template_name = 'admin_doc/template_tag_index.html' def get_context_data(self, **kwargs): tags = [] try: engine = Engine.get_default() except ImproperlyConfigured: # Non-trivial TEMPLATES settings aren't supported (#24125). pass else: app_libs = sorted(engine.template_libraries.items()) builtin_libs = [('', lib) for lib in engine.template_builtins] for module_name, library in builtin_libs + app_libs: for tag_name, tag_func in library.tags.items(): title, body, metadata = utils.parse_docstring(tag_func.__doc__) if title: title = utils.parse_rst(title, 'tag', _('tag:') + tag_name) if body: body = utils.parse_rst(body, 'tag', _('tag:') + tag_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name) tag_library = module_name.split('.')[-1] tags.append({ 'name': tag_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) kwargs.update({'tags': tags}) return super(TemplateTagIndexView, self).get_context_data(**kwargs) class TemplateFilterIndexView(BaseAdminDocsView): template_name = 'admin_doc/template_filter_index.html' def get_context_data(self, **kwargs): filters = [] try: engine = Engine.get_default() except ImproperlyConfigured: # Non-trivial TEMPLATES settings aren't supported (#24125). pass else: app_libs = sorted(engine.template_libraries.items()) builtin_libs = [('', lib) for lib in engine.template_builtins] for module_name, library in builtin_libs + app_libs: for filter_name, filter_func in library.filters.items(): title, body, metadata = utils.parse_docstring(filter_func.__doc__) if title: title = utils.parse_rst(title, 'filter', _('filter:') + filter_name) if body: body = utils.parse_rst(body, 'filter', _('filter:') + filter_name) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name) tag_library = module_name.split('.')[-1] filters.append({ 'name': filter_name, 'title': title, 'body': body, 'meta': metadata, 'library': tag_library, }) kwargs.update({'filters': filters}) return super(TemplateFilterIndexView, self).get_context_data(**kwargs) class ViewIndexView(BaseAdminDocsView): template_name = 'admin_doc/view_index.html' def get_context_data(self, **kwargs): views = [] urlconf = import_module(settings.ROOT_URLCONF) view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) for (func, regex, namespace, name) in view_functions: views.append({ 'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)), 'url': simplify_regex(regex), 'url_name': ':'.join((namespace or []) + (name and [name] or [])), 'namespace': ':'.join((namespace or [])), 'name': name, }) kwargs.update({'views': views}) return super(ViewIndexView, self).get_context_data(**kwargs) class ViewDetailView(BaseAdminDocsView): template_name = 'admin_doc/view_detail.html' def get_context_data(self, **kwargs): view = self.kwargs['view'] urlconf = urlresolvers.get_urlconf() if urlresolvers.get_resolver(urlconf)._is_callback(view): mod, func = urlresolvers.get_mod_func(view) view_func = getattr(import_module(mod), func) else: raise Http404 title, body, metadata = utils.parse_docstring(view_func.__doc__) if title: title = utils.parse_rst(title, 'view', _('view:') + view) if body: body = utils.parse_rst(body, 'view', _('view:') + view) for key in metadata: metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view) kwargs.update({ 'name': view, 'summary': title, 'body': body, 'meta': metadata, }) return super(ViewDetailView, self).get_context_data(**kwargs) class ModelIndexView(BaseAdminDocsView): template_name = 'admin_doc/model_index.html' def get_context_data(self, **kwargs): m_list = [m._meta for m in apps.get_models()] kwargs.update({'models': m_list}) return super(ModelIndexView, self).get_context_data(**kwargs) class ModelDetailView(BaseAdminDocsView): template_name = 'admin_doc/model_detail.html' def get_context_data(self, **kwargs): model_name = self.kwargs['model_name'] # Get the model class. try: app_config = apps.get_app_config(self.kwargs['app_label']) except LookupError: raise Http404(_("App %(app_label)r not found") % self.kwargs) try: model = app_config.get_model(model_name) except LookupError: raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs) opts = model._meta title, body, metadata = utils.parse_docstring(model.__doc__) if title: title = utils.parse_rst(title, 'model', _('model:') + model_name) if body: body = utils.parse_rst(body, 'model', _('model:') + model_name) # Gather fields/field descriptions. fields = [] for field in opts.fields: # ForeignKey is a special case since the field will actually be a # descriptor that returns the other object if isinstance(field, models.ForeignKey): data_type = field.remote_field.model.__name__ app_label = field.remote_field.model._meta.app_label verbose = utils.parse_rst( (_("the related `%(app_label)s.%(data_type)s` object") % { 'app_label': app_label, 'data_type': data_type, }), 'model', _('model:') + data_type, ) else: data_type = get_readable_field_data_type(field) verbose = field.verbose_name fields.append({ 'name': field.name, 'data_type': data_type, 'verbose': verbose, 'help_text': field.help_text, }) # Gather many-to-many fields. for field in opts.many_to_many: data_type = field.remote_field.model.__name__ app_label = field.remote_field.model._meta.app_label verbose = _("related `%(app_label)s.%(object_name)s` objects") % { 'app_label': app_label, 'object_name': data_type, } fields.append({ 'name': "%s.all" % field.name, "data_type": 'List', 'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name), }) fields.append({ 'name': "%s.count" % field.name, 'data_type': 'Integer', 'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name), }) # Gather model methods. for func_name, func in model.__dict__.items(): if inspect.isfunction(func) and func_has_no_args(func): try: for exclude in MODEL_METHODS_EXCLUDE: if func_name.startswith(exclude): raise StopIteration except StopIteration: continue verbose = func.__doc__ if verbose: verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name) fields.append({ 'name': func_name, 'data_type': get_return_data_type(func_name), 'verbose': verbose, }) # Gather related objects for rel in opts.related_objects: verbose = _("related `%(app_label)s.%(object_name)s` objects") % { 'app_label': rel.related_model._meta.app_label, 'object_name': rel.related_model._meta.object_name, } accessor = rel.get_accessor_name() fields.append({ 'name': "%s.all" % accessor, 'data_type': 'List', 'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name), }) fields.append({ 'name': "%s.count" % accessor, 'data_type': 'Integer', 'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name), }) kwargs.update({ 'name': '%s.%s' % (opts.app_label, opts.object_name), 'summary': title, 'description': body, 'fields': fields, }) return super(ModelDetailView, self).get_context_data(**kwargs) class TemplateDetailView(BaseAdminDocsView): template_name = 'admin_doc/template_detail.html' def get_context_data(self, **kwargs): template = self.kwargs['template'] templates = [] try: default_engine = Engine.get_default() except ImproperlyConfigured: # Non-trivial TEMPLATES settings aren't supported (#24125). pass else: # This doesn't account for template loaders (#24128). for index, directory in enumerate(default_engine.dirs): template_file = os.path.join(directory, template) templates.append({ 'file': template_file, 'exists': os.path.exists(template_file), 'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '', 'order': index, }) kwargs.update({ 'name': template, 'templates': templates, }) return super(TemplateDetailView, self).get_context_data(**kwargs) #################### # Helper functions # #################### def get_return_data_type(func_name): """Return a somewhat-helpful data type given a function name""" if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return '' def get_readable_field_data_type(field): """Returns the description for a given field type, if it exists, Fields' descriptions can contain format strings, which will be interpolated against the values of field.__dict__ before being output.""" return field.description % field.__dict__ def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) """ views = [] for p in urlpatterns: if hasattr(p, 'url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns( patterns, base + p.regex.pattern, (namespace or []) + (p.namespace and [p.namespace] or []) )) elif hasattr(p, 'callback'): try: views.append((p.callback, base + p.regex.pattern, namespace, p.name)) except ViewDoesNotExist: continue else: raise TypeError(_("%s does not appear to be a urlpattern object") % p) return views named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)') non_named_group_matcher = re.compile(r'\(.*?\)') def simplify_regex(pattern): """ Clean up urlpattern regexes into something somewhat readable by Mere Humans: turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into "<sport_slug>/athletes/<athlete_slug>/" """ # handle named groups first pattern = named_group_matcher.sub(lambda m: m.group(1), pattern) # handle non-named groups pattern = non_named_group_matcher.sub("<var>", pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ''' Small script to rewrite McStas trace output to CSV data for plotting ''' import argparse import sys import numpy as np import x3d from util import parse_multiline, rotate, get_line, debug, draw_circle UC_COMP = 'COMPONENT:' MC_COMP = 'MCDISPLAY: component' MC_COMP_SHORT = 'COMP: ' MC_LINE = 'MCDISPLAY: multiline' MC_CIRCLE = 'MCDISPLAY: circle' MC_ENTER = 'ENTER:' MC_LEAVE = 'LEAVE:' MC_STATE = 'STATE:' MC_SCATTER = 'SCATTER:' MC_ABSORB = 'ABSORB:' colors = ["1.0, 0.0, 0.0","0.0, 1.0, 0.0","0.0, 0.0, 1.0", "1.0, 1.0, 0.0","1.0, 0.0, 1.0","0.0, 1.0, 1.0", "1.0, 1.0, 1.0","0.5, 1.0, 1.0","1.0, 0.5, 1.0", "1.0, 1.0, 0.5","0.5, 0.0, 1.0","0.0, 0.5, 1.0", "0.0, 1.0, 0.5","0.5, 1.0, 0.0","1.0, 0.5, 0.0", "1.0, 0.0, 0.5","0.5, 0.0, 0.0","0.0, 0.5, 0.0", "0.0, 0.0, 0.5","0.5, 0.5, 1.0","0.5, 1.0, 0.5", "1.0, 0.5, 0.5","0.5, 0.0, 0.5","0.0, 0.5, 0.5", "0.5, 0.5, 0.0","0.5, 0.5, 0.5"] def getColor(n): return colors[n % len(colors)] def parse_trace(world, fp=sys.stdin, inspectComp=None): ''' Prase McStas trace output from stdin and write result to output ''' color = 0 # def out_point((p_x, p_y, p_z)): # ''' Write a line to csv_lines ''' # csv_lines.write('%s, %s, %s, %s\n' % (p_x, p_y, p_z, color)) # print headers # csv_comps.write('name, x, y, z\n') # csv_lines.write('x, y, z, c\n') # map from component name to (position, rotation matrix) comps = {} # active (position, rotation matrix) comp = (np.array([0, 0, 0]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).reshape(3,3)) compName = "" # we are following a neutron active = False # we need to draw the neutron (it passed the "check-point"/inspect component) inspect = False # list of observed neutron positions neutron = [] # skip next neutron position skip = False # total count of drawed neutrons neutrons_drawed = 0 while True: # read line line = get_line(fp) if line is None: break # register components if line.startswith(UC_COMP): # grab info line info = get_line(fp) assert info[:4] == 'POS:' nums = [x.strip() for x in info[4:].split(',')] # extract fields name = line[len(UC_COMP):].strip(' "\n') pos = np.array([float(x) for x in nums[:3]]) # read flat 3x3 rotation matrix rot = np.array([float(x) for x in nums[3:3+9]]).reshape(3, 3) comps[name] = (pos, rot) # csv_comps.write('%s, %s, %s, %s\n' % ((name,) + tuple(pos))) # switch perspective elif line.startswith(MC_COMP): color += 1 name = line[len(MC_COMP) + 1:].strip() compName = name comp = comps[name] elif line.startswith(MC_COMP_SHORT): name = line[len(MC_COMP_SHORT) + 1:].strip('"') compName = name comp = comps[name] skip = True # process multiline elif line.startswith(MC_LINE): points = parse_multiline(line[len(MC_LINE):].strip('()')) world.drawLine((rotate(p, comp) for p in points), color=getColor(color)) # process circle elif line.startswith(MC_CIRCLE): xyz = 'xyz' items = line[len(MC_CIRCLE):].strip('()').split(',') # plane pla = [xyz.find(a) for a in items[0].strip("''")] # center and radius pos = [float(x) for x in items[1:4]] rad = float(items[4]) points = draw_circle(pla, pos, rad, comp) world.drawLine(points, color=getColor(color)) # activate neutron when it enters elif line.startswith(MC_ENTER): neutron = [] skip = True active = True inspect = False color += 1 # deactivate neutron when it leaves elif line.startswith(MC_LEAVE) or line.startswith(MC_ABSORB): active = False if inspectComp is None or inspect: world.drawLine(neutron, color="1 0 0") neutrons_drawed += 1 # register state and scatter elif line.startswith(MC_STATE) or line.startswith(MC_SCATTER): if not active: continue if skip: skip = False continue if inspectComp and inspectComp == compName: # We will draw this neutron! inspect = True # keep track of points the neutron passes through xyz = [float(x) for x in line[line.find(':')+1:].split(',')[:3]] xyz = rotate(xyz, comp) neutron.append(xyz) print('Neutrons drawed:', neutrons_drawed, (inspectComp and '(reaching %s)' % inspectComp or '(all)')) return world
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; import java.util.Optional; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class AutoOffsetResetStrategyTest { @Test public void testFromString() { assertEquals(AutoOffsetResetStrategy.EARLIEST, AutoOffsetResetStrategy.fromString("earliest")); assertEquals(AutoOffsetResetStrategy.LATEST, AutoOffsetResetStrategy.fromString("latest")); assertEquals(AutoOffsetResetStrategy.NONE, AutoOffsetResetStrategy.fromString("none")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("invalid")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:invalid")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:-PT1H")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration:")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("by_duration")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("LATEST")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("EARLIEST")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("NONE")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString("")); assertThrows(IllegalArgumentException.class, () -> AutoOffsetResetStrategy.fromString(null)); AutoOffsetResetStrategy strategy = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); assertEquals("by_duration", strategy.name()); } @Test public void testValidator() { AutoOffsetResetStrategy.Validator validator = new AutoOffsetResetStrategy.Validator(); assertDoesNotThrow(() -> validator.ensureValid("test", "earliest")); assertDoesNotThrow(() -> validator.ensureValid("test", "latest")); assertDoesNotThrow(() -> validator.ensureValid("test", "none")); assertDoesNotThrow(() -> validator.ensureValid("test", "by_duration:PT1H")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "invalid")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:invalid")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:-PT1H")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration:")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "by_duration")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "LATEST")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "EARLIEST")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "NONE")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", "")); assertThrows(ConfigException.class, () -> validator.ensureValid("test", null)); } @Test public void testEqualsAndHashCode() { AutoOffsetResetStrategy earliest1 = AutoOffsetResetStrategy.fromString("earliest"); AutoOffsetResetStrategy earliest2 = AutoOffsetResetStrategy.fromString("earliest"); AutoOffsetResetStrategy latest1 = AutoOffsetResetStrategy.fromString("latest"); AutoOffsetResetStrategy duration1 = AutoOffsetResetStrategy.fromString("by_duration:P2D"); AutoOffsetResetStrategy duration2 = AutoOffsetResetStrategy.fromString("by_duration:P2D"); assertEquals(earliest1, earliest2); assertNotEquals(earliest1, latest1); assertEquals(earliest1.hashCode(), earliest2.hashCode()); assertNotEquals(earliest1.hashCode(), latest1.hashCode()); assertNotEquals(latest1, duration2); assertEquals(duration1, duration2); } @Test public void testTimestamp() { AutoOffsetResetStrategy earliest1 = AutoOffsetResetStrategy.fromString("earliest"); AutoOffsetResetStrategy earliest2 = AutoOffsetResetStrategy.fromString("earliest"); assertEquals(Optional.of(ListOffsetsRequest.EARLIEST_TIMESTAMP), earliest1.timestamp()); assertEquals(earliest1, earliest2); AutoOffsetResetStrategy latest1 = AutoOffsetResetStrategy.fromString("latest"); AutoOffsetResetStrategy latest2 = AutoOffsetResetStrategy.fromString("latest"); assertEquals(Optional.of(ListOffsetsRequest.LATEST_TIMESTAMP), latest1.timestamp()); assertEquals(latest1, latest2); AutoOffsetResetStrategy none1 = AutoOffsetResetStrategy.fromString("none"); AutoOffsetResetStrategy none2 = AutoOffsetResetStrategy.fromString("none"); assertFalse(none1.timestamp().isPresent()); assertEquals(none1, none2); AutoOffsetResetStrategy byDuration1 = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); Optional<Long> timestamp = byDuration1.timestamp(); assertTrue(timestamp.isPresent()); assertTrue(timestamp.get() <= Instant.now().toEpochMilli() - Duration.ofHours(1).toMillis()); AutoOffsetResetStrategy byDuration2 = AutoOffsetResetStrategy.fromString("by_duration:PT1H"); AutoOffsetResetStrategy byDuration3 = AutoOffsetResetStrategy.fromString("by_duration:PT2H"); assertEquals(byDuration1, byDuration2); assertNotEquals(byDuration1, byDuration3); } }
java
github
https://github.com/apache/kafka
clients/src/test/java/org/apache/kafka/clients/consumer/internals/AutoOffsetResetStrategyTest.java
/* * jidctint.c * * This file was part of the Independent JPEG Group's software: * Copyright (C) 1991-1998, Thomas G. Lane. * Modification developed 2002-2018 by Guido Vollbeding. * libjpeg-turbo Modifications: * Copyright (C) 2015, 2020, 2022, D. R. Commander. * For conditions of distribution and use, see the accompanying README.ijg * file. * * This file contains a slower but more accurate integer implementation of the * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine * must also perform dequantization of the input coefficients. * * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT * on each row (or vice versa, but it's more convenient to emit a row at * a time). Direct algorithms are also available, but they are much more * complex and seem not to be any faster when reduced to code. * * This implementation is based on an algorithm described in * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. * The primary algorithm described there uses 11 multiplies and 29 adds. * We use their alternate method with 12 multiplies and 32 adds. * The advantage of this method is that no data path contains more than one * multiplication; this allows a very simple and accurate implementation in * scaled fixed-point arithmetic, with a minimal number of shifts. * * We also provide IDCT routines with various output sample block sizes for * direct resolution reduction or enlargement without additional resampling: * NxN (N=1...16) pixels for one 8x8 input DCT block. * * For N<8 we simply take the corresponding low-frequency coefficients of * the 8x8 input DCT block and apply an NxN point IDCT on the sub-block * to yield the downscaled outputs. * This can be seen as direct low-pass downsampling from the DCT domain * point of view rather than the usual spatial domain point of view, * yielding significant computational savings and results at least * as good as common bilinear (averaging) spatial downsampling. * * For N>8 we apply a partial NxN IDCT on the 8 input coefficients as * lower frequencies and higher frequencies assumed to be zero. * It turns out that the computational effort is similar to the 8x8 IDCT * regarding the output size. * Furthermore, the scaling and descaling is the same for all IDCT sizes. * * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases * since there would be too many additional constants to pre-calculate. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" #include "jdct.h" /* Private declarations for DCT subsystem */ #ifdef DCT_ISLOW_SUPPORTED /* * This module is specialized to the case DCTSIZE = 8. */ #if DCTSIZE != 8 Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */ #endif /* * The poop on this scaling stuff is as follows: * * Each 1-D IDCT step produces outputs which are a factor of sqrt(N) * larger than the true IDCT outputs. The final outputs are therefore * a factor of N larger than desired; since N=8 this can be cured by * a simple right shift at the end of the algorithm. The advantage of * this arrangement is that we save two multiplications per 1-D IDCT, * because the y0 and y4 inputs need not be divided by sqrt(N). * * We have to do addition and subtraction of the integer inputs, which * is no problem, and multiplication by fractional constants, which is * a problem to do in integer arithmetic. We multiply all the constants * by CONST_SCALE and convert them to integer constants (thus retaining * CONST_BITS bits of precision in the constants). After doing a * multiplication we have to divide the product by CONST_SCALE, with proper * rounding, to produce the correct output. This division can be done * cheaply as a right shift of CONST_BITS bits. We postpone shifting * as long as possible so that partial sums can be added together with * full fractional precision. * * The outputs of the first pass are scaled up by PASS1_BITS bits so that * they are represented to better-than-integral precision. These outputs * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word * with the recommended scaling. (To scale up 12-bit sample data further, an * intermediate JLONG array would be needed.) * * To avoid overflow of the 32-bit intermediate results in pass 2, we must * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis * shows that the values given below are the most effective. */ #if BITS_IN_JSAMPLE == 8 #define CONST_BITS 13 #define PASS1_BITS 2 #else #define CONST_BITS 13 #define PASS1_BITS 1 /* lose a little precision to avoid overflow */ #endif /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus * causing a lot of useless floating-point operations at run time. * To get around this we use the following pre-calculated constants. * If you change CONST_BITS you may want to add appropriate values. * (With a reasonable C compiler, you can just rely on the FIX() macro...) */ #if CONST_BITS == 13 #define FIX_0_298631336 ((JLONG)2446) /* FIX(0.298631336) */ #define FIX_0_390180644 ((JLONG)3196) /* FIX(0.390180644) */ #define FIX_0_541196100 ((JLONG)4433) /* FIX(0.541196100) */ #define FIX_0_765366865 ((JLONG)6270) /* FIX(0.765366865) */ #define FIX_0_899976223 ((JLONG)7373) /* FIX(0.899976223) */ #define FIX_1_175875602 ((JLONG)9633) /* FIX(1.175875602) */ #define FIX_1_501321110 ((JLONG)12299) /* FIX(1.501321110) */ #define FIX_1_847759065 ((JLONG)15137) /* FIX(1.847759065) */ #define FIX_1_961570560 ((JLONG)16069) /* FIX(1.961570560) */ #define FIX_2_053119869 ((JLONG)16819) /* FIX(2.053119869) */ #define FIX_2_562915447 ((JLONG)20995) /* FIX(2.562915447) */ #define FIX_3_072711026 ((JLONG)25172) /* FIX(3.072711026) */ #else #define FIX_0_298631336 FIX(0.298631336) #define FIX_0_390180644 FIX(0.390180644) #define FIX_0_541196100 FIX(0.541196100) #define FIX_0_765366865 FIX(0.765366865) #define FIX_0_899976223 FIX(0.899976223) #define FIX_1_175875602 FIX(1.175875602) #define FIX_1_501321110 FIX(1.501321110) #define FIX_1_847759065 FIX(1.847759065) #define FIX_1_961570560 FIX(1.961570560) #define FIX_2_053119869 FIX(2.053119869) #define FIX_2_562915447 FIX(2.562915447) #define FIX_3_072711026 FIX(3.072711026) #endif /* Multiply an JLONG variable by an JLONG constant to yield an JLONG result. * For 8-bit samples with the recommended scaling, all the variable * and constant values involved are no more than 16 bits wide, so a * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. * For 12-bit samples, a full 32-bit multiplication will be needed. */ #if BITS_IN_JSAMPLE == 8 #define MULTIPLY(var, const) MULTIPLY16C16(var, const) #else #define MULTIPLY(var, const) ((var) * (const)) #endif /* Dequantize a coefficient by multiplying it by the multiplier-table * entry; produce an int result. In this module, both inputs and result * are 16 bits or less, so either int or short multiply will work. */ #define DEQUANTIZE(coef, quantval) (((ISLOW_MULT_TYPE)(coef)) * (quantval)) /* * Perform dequantization and inverse DCT on one block of coefficients. */ GLOBAL(void) _jpeg_idct_islow(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp2, tmp3; JLONG tmp10, tmp11, tmp12, tmp13; JLONG z1, z2, z3, z4, z5; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[DCTSIZE2]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ /* furthermore, we scale the results by 2**PASS1_BITS. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = DCTSIZE; ctr > 0; ctr--) { /* Due to quantization, we will usually find that many of the input * coefficients are zero, especially the AC terms. We can exploit this * by short-circuiting the IDCT calculation for any column in which all * the AC terms are zero. In that case each output is equal to the * DC coefficient (with scale factor as needed). * With typical images and quantization tables, half or more of the * column DCT calculations can be simplified this way. */ if (inptr[DCTSIZE * 1] == 0 && inptr[DCTSIZE * 2] == 0 && inptr[DCTSIZE * 3] == 0 && inptr[DCTSIZE * 4] == 0 && inptr[DCTSIZE * 5] == 0 && inptr[DCTSIZE * 6] == 0 && inptr[DCTSIZE * 7] == 0) { /* AC terms all zero */ int dcval = LEFT_SHIFT(DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]), PASS1_BITS); wsptr[DCTSIZE * 0] = dcval; wsptr[DCTSIZE * 1] = dcval; wsptr[DCTSIZE * 2] = dcval; wsptr[DCTSIZE * 3] = dcval; wsptr[DCTSIZE * 4] = dcval; wsptr[DCTSIZE * 5] = dcval; wsptr[DCTSIZE * 6] = dcval; wsptr[DCTSIZE * 7] = dcval; inptr++; /* advance pointers to next column */ quantptr++; wsptr++; continue; } /* Even part: reverse the even part of the forward DCT. */ /* The rotator is sqrt(2)*c(-6). */ z2 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); z1 = MULTIPLY(z2 + z3, FIX_0_541196100); tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); z2 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z3 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; /* Odd part per figure 8; the matrix is unitary and hence its * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. */ tmp0 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp1 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); tmp2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); tmp3 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; z3 = tmp0 + tmp2; z4 = tmp1 + tmp3; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, -FIX_0_899976223); /* sqrt(2) * ( c7-c3) */ z2 = MULTIPLY(z2, -FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, -FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, -FIX_0_390180644); /* sqrt(2) * ( c5-c3) */ z3 += z5; z4 += z5; tmp0 += z1 + z3; tmp1 += z2 + z4; tmp2 += z2 + z3; tmp3 += z1 + z4; /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ wsptr[DCTSIZE * 0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 1] = (int)DESCALE(tmp11 + tmp2, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 6] = (int)DESCALE(tmp11 - tmp2, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 2] = (int)DESCALE(tmp12 + tmp1, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 5] = (int)DESCALE(tmp12 - tmp1, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 3] = (int)DESCALE(tmp13 + tmp0, CONST_BITS - PASS1_BITS); wsptr[DCTSIZE * 4] = (int)DESCALE(tmp13 - tmp0, CONST_BITS - PASS1_BITS); inptr++; /* advance pointers to next column */ quantptr++; wsptr++; } /* Pass 2: process rows from work array, store into output array. */ /* Note that we must descale the results by a factor of 8 == 2**3, */ /* and also undo the PASS1_BITS scaling. */ wsptr = workspace; for (ctr = 0; ctr < DCTSIZE; ctr++) { outptr = output_buf[ctr] + output_col; /* Rows of zeroes can be exploited in the same way as we did with columns. * However, the column calculation has created many nonzero AC terms, so * the simplification applies less often (typically 5% to 10% of the time). * On machines with very fast multiplication, it's possible that the * test takes more time than it's worth. In that case this section * may be commented out. */ #ifndef NO_ZERO_ROW_TEST if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 && wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) { /* AC terms all zero */ _JSAMPLE dcval = range_limit[(int)DESCALE((JLONG)wsptr[0], PASS1_BITS + 3) & RANGE_MASK]; outptr[0] = dcval; outptr[1] = dcval; outptr[2] = dcval; outptr[3] = dcval; outptr[4] = dcval; outptr[5] = dcval; outptr[6] = dcval; outptr[7] = dcval; wsptr += DCTSIZE; /* advance pointer to next row */ continue; } #endif /* Even part: reverse the even part of the forward DCT. */ /* The rotator is sqrt(2)*c(-6). */ z2 = (JLONG)wsptr[2]; z3 = (JLONG)wsptr[6]; z1 = MULTIPLY(z2 + z3, FIX_0_541196100); tmp2 = z1 + MULTIPLY(z3, -FIX_1_847759065); tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); tmp0 = LEFT_SHIFT((JLONG)wsptr[0] + (JLONG)wsptr[4], CONST_BITS); tmp1 = LEFT_SHIFT((JLONG)wsptr[0] - (JLONG)wsptr[4], CONST_BITS); tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; /* Odd part per figure 8; the matrix is unitary and hence its * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. */ tmp0 = (JLONG)wsptr[7]; tmp1 = (JLONG)wsptr[5]; tmp2 = (JLONG)wsptr[3]; tmp3 = (JLONG)wsptr[1]; z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; z3 = tmp0 + tmp2; z4 = tmp1 + tmp3; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, -FIX_0_899976223); /* sqrt(2) * ( c7-c3) */ z2 = MULTIPLY(z2, -FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, -FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, -FIX_0_390180644); /* sqrt(2) * ( c5-c3) */ z3 += z5; z4 += z5; tmp0 += z1 + z3; tmp1 += z2 + z4; tmp2 += z2 + z3; tmp3 += z1 + z4; /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ outptr[0] = range_limit[(int)DESCALE(tmp10 + tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)DESCALE(tmp10 - tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)DESCALE(tmp11 + tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)DESCALE(tmp11 - tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)DESCALE(tmp12 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)DESCALE(tmp12 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)DESCALE(tmp13 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)DESCALE(tmp13 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += DCTSIZE; /* advance pointer to next row */ } } #ifdef IDCT_SCALING_SUPPORTED /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a reduced-size 7x7 output block. * * Optimized algorithm with 12 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/14). */ GLOBAL(void) _jpeg_idct_7x7(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; JLONG z1, z2, z3; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[7 * 7]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp13 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp13 = LEFT_SHIFT(tmp13, CONST_BITS); /* Add fudge factor here for final descale. */ tmp13 += ONE << (CONST_BITS - PASS1_BITS - 1); z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z2 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ tmp0 = z1 + z3; z2 -= tmp0; tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */ tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ tmp0 = tmp1 - tmp2; tmp1 += tmp2; tmp2 = MULTIPLY(z2 + z3, -FIX(1.378756276)); /* -c1 */ tmp1 += tmp2; z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ tmp0 += z2; tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ /* Final output stage */ wsptr[7 * 0] = (int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS - PASS1_BITS); wsptr[7 * 6] = (int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS - PASS1_BITS); wsptr[7 * 1] = (int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS - PASS1_BITS); wsptr[7 * 5] = (int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS - PASS1_BITS); wsptr[7 * 2] = (int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS - PASS1_BITS); wsptr[7 * 4] = (int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS - PASS1_BITS); wsptr[7 * 3] = (int)RIGHT_SHIFT(tmp13, CONST_BITS - PASS1_BITS); } /* Pass 2: process 7 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 7; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp13 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp13 = LEFT_SHIFT(tmp13, CONST_BITS); z1 = (JLONG)wsptr[2]; z2 = (JLONG)wsptr[4]; z3 = (JLONG)wsptr[6]; tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */ tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ tmp0 = z1 + z3; z2 -= tmp0; tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */ tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */ tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */ tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */ /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */ tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */ tmp0 = tmp1 - tmp2; tmp1 += tmp2; tmp2 = MULTIPLY(z2 + z3, -FIX(1.378756276)); /* -c1 */ tmp1 += tmp2; z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */ tmp0 += z2; tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 7; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a reduced-size 6x6 output block. * * Optimized algorithm with 3 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/12). */ GLOBAL(void) _jpeg_idct_6x6(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12; JLONG z1, z2, z3; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[6 * 6]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp0 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); /* Add fudge factor here for final descale. */ tmp0 += ONE << (CONST_BITS - PASS1_BITS - 1); tmp2 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ tmp1 = tmp0 + tmp10; tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS - PASS1_BITS); tmp10 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ tmp10 = tmp1 + tmp0; tmp12 = tmp1 - tmp0; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ tmp0 = tmp1 + LEFT_SHIFT(z1 + z2, CONST_BITS); tmp2 = tmp1 + LEFT_SHIFT(z3 - z2, CONST_BITS); tmp1 = LEFT_SHIFT(z1 - z2 - z3, PASS1_BITS); /* Final output stage */ wsptr[6 * 0] = (int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS - PASS1_BITS); wsptr[6 * 5] = (int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS - PASS1_BITS); wsptr[6 * 1] = (int)(tmp11 + tmp1); wsptr[6 * 4] = (int)(tmp11 - tmp1); wsptr[6 * 2] = (int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS - PASS1_BITS); wsptr[6 * 3] = (int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS - PASS1_BITS); } /* Pass 2: process 6 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 6; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp0 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); tmp2 = (JLONG)wsptr[4]; tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */ tmp1 = tmp0 + tmp10; tmp11 = tmp0 - tmp10 - tmp10; tmp10 = (JLONG)wsptr[2]; tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */ tmp10 = tmp1 + tmp0; tmp12 = tmp1 - tmp0; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */ tmp0 = tmp1 + LEFT_SHIFT(z1 + z2, CONST_BITS); tmp2 = tmp1 + LEFT_SHIFT(z3 - z2, CONST_BITS); tmp1 = LEFT_SHIFT(z1 - z2 - z3, CONST_BITS); /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 6; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a reduced-size 5x5 output block. * * Optimized algorithm with 5 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/10). */ GLOBAL(void) _jpeg_idct_5x5(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp10, tmp11, tmp12; JLONG z1, z2, z3; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[5 * 5]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp12 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp12 = LEFT_SHIFT(tmp12, CONST_BITS); /* Add fudge factor here for final descale. */ tmp12 += ONE << (CONST_BITS - PASS1_BITS - 1); tmp0 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); tmp1 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */ z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */ z3 = tmp12 + z2; tmp10 = z3 + z1; tmp11 = z3 - z1; tmp12 -= LEFT_SHIFT(z2, 2); /* Odd part */ z2 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z3 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ /* Final output stage */ wsptr[5 * 0] = (int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS - PASS1_BITS); wsptr[5 * 4] = (int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS - PASS1_BITS); wsptr[5 * 1] = (int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS - PASS1_BITS); wsptr[5 * 3] = (int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS - PASS1_BITS); wsptr[5 * 2] = (int)RIGHT_SHIFT(tmp12, CONST_BITS - PASS1_BITS); } /* Pass 2: process 5 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 5; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp12 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp12 = LEFT_SHIFT(tmp12, CONST_BITS); tmp0 = (JLONG)wsptr[2]; tmp1 = (JLONG)wsptr[4]; z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */ z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */ z3 = tmp12 + z2; tmp10 = z3 + z1; tmp11 = z3 - z1; tmp12 -= LEFT_SHIFT(z2, 2); /* Odd part */ z2 = (JLONG)wsptr[1]; z3 = (JLONG)wsptr[3]; z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */ tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */ tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 5; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a reduced-size 3x3 output block. * * Optimized algorithm with 2 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/6). */ GLOBAL(void) _jpeg_idct_3x3(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp2, tmp10, tmp12; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[3 * 3]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp0 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); /* Add fudge factor here for final descale. */ tmp0 += ONE << (CONST_BITS - PASS1_BITS - 1); tmp2 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ tmp10 = tmp0 + tmp12; tmp2 = tmp0 - tmp12 - tmp12; /* Odd part */ tmp12 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ /* Final output stage */ wsptr[3 * 0] = (int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS - PASS1_BITS); wsptr[3 * 2] = (int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS - PASS1_BITS); wsptr[3 * 1] = (int)RIGHT_SHIFT(tmp2, CONST_BITS - PASS1_BITS); } /* Pass 2: process 3 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 3; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp0 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); tmp2 = (JLONG)wsptr[2]; tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */ tmp10 = tmp0 + tmp12; tmp2 = tmp0 - tmp12 - tmp12; /* Odd part */ tmp12 = (JLONG)wsptr[1]; tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 3; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 9x9 output block. * * Optimized algorithm with 10 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/18). */ GLOBAL(void) _jpeg_idct_9x9(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 9]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp0 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); /* Add fudge factor here for final descale. */ tmp0 += ONE << (CONST_BITS - PASS1_BITS - 1); z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z2 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */ tmp1 = tmp0 + tmp3; tmp2 = tmp0 - tmp3 - tmp3; tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */ tmp11 = tmp2 + tmp0; tmp14 = tmp2 - tmp0 - tmp0; tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */ tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */ tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */ tmp10 = tmp1 + tmp0 - tmp3; tmp12 = tmp1 - tmp0 + tmp2; tmp13 = tmp1 - tmp2 + tmp3; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); z2 = MULTIPLY(z2, -FIX(1.224744871)); /* -c3 */ tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */ tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */ tmp0 = tmp2 + tmp3 - z2; tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */ tmp2 += z2 - tmp1; tmp3 += z2 + tmp1; tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp14, CONST_BITS - PASS1_BITS); } /* Pass 2: process 9 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 9; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp0 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); z1 = (JLONG)wsptr[2]; z2 = (JLONG)wsptr[4]; z3 = (JLONG)wsptr[6]; tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */ tmp1 = tmp0 + tmp3; tmp2 = tmp0 - tmp3 - tmp3; tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */ tmp11 = tmp2 + tmp0; tmp14 = tmp2 - tmp0 - tmp0; tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */ tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */ tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */ tmp10 = tmp1 + tmp0 - tmp3; tmp12 = tmp1 - tmp0 + tmp2; tmp13 = tmp1 - tmp2 + tmp3; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; z2 = MULTIPLY(z2, -FIX(1.224744871)); /* -c3 */ tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */ tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */ tmp0 = tmp2 + tmp3 - z2; tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */ tmp2 += z2 - tmp1; tmp3 += z2 + tmp1; tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 10x10 output block. * * Optimized algorithm with 12 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/20). */ GLOBAL(void) _jpeg_idct_10x10(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14; JLONG tmp20, tmp21, tmp22, tmp23, tmp24; JLONG z1, z2, z3, z4, z5; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 10]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ z3 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z3 = LEFT_SHIFT(z3, CONST_BITS); /* Add fudge factor here for final descale. */ z3 += ONE << (CONST_BITS - PASS1_BITS - 1); z4 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ tmp10 = z3 + z1; tmp11 = z3 - z2; tmp22 = RIGHT_SHIFT(z3 - LEFT_SHIFT(z1 - z2, 1), CONST_BITS - PASS1_BITS); /* c0 = (c4-c8)*2 */ z2 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ tmp20 = tmp10 + tmp12; tmp24 = tmp10 - tmp12; tmp21 = tmp11 + tmp13; tmp23 = tmp11 - tmp13; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp11 = z2 + z4; tmp13 = z2 - z4; tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ z5 = LEFT_SHIFT(z3, CONST_BITS); z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ z4 = z5 + tmp12; tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ z4 = z5 - tmp12 - LEFT_SHIFT(tmp13, CONST_BITS - 1); tmp12 = LEFT_SHIFT(z1 - tmp13 - z3, PASS1_BITS); tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)(tmp22 + tmp12); wsptr[8 * 7] = (int)(tmp22 - tmp12); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); } /* Pass 2: process 10 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 10; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ z3 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); z3 = LEFT_SHIFT(z3, CONST_BITS); z4 = (JLONG)wsptr[4]; z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */ z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */ tmp10 = z3 + z1; tmp11 = z3 - z2; tmp22 = z3 - LEFT_SHIFT(z1 - z2, 1); /* c0 = (c4-c8)*2 */ z2 = (JLONG)wsptr[2]; z3 = (JLONG)wsptr[6]; z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */ tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */ tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */ tmp20 = tmp10 + tmp12; tmp24 = tmp10 - tmp12; tmp21 = tmp11 + tmp13; tmp23 = tmp11 - tmp13; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z3 = LEFT_SHIFT(z3, CONST_BITS); z4 = (JLONG)wsptr[7]; tmp11 = z2 + z4; tmp13 = z2 - z4; tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */ z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */ z4 = z3 + tmp12; tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */ tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */ z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */ z4 = z3 - tmp12 - LEFT_SHIFT(tmp13, CONST_BITS - 1); tmp12 = LEFT_SHIFT(z1 - tmp13, CONST_BITS) - z3; tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */ tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing an 11x11 output block. * * Optimized algorithm with 24 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/22). */ GLOBAL(void) _jpeg_idct_11x11(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 11]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp10 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp10 = LEFT_SHIFT(tmp10, CONST_BITS); /* Add fudge factor here for final descale. */ tmp10 += ONE << (CONST_BITS - PASS1_BITS - 1); z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z2 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z3 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */ z4 = z1 + z3; tmp24 = MULTIPLY(z4, -FIX(1.155664402)); /* -(c2-c10) */ z4 -= z2; tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */ tmp21 = tmp20 + tmp23 + tmp25 - MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */ tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */ tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */ tmp24 += tmp25; tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */ tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */ MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */ tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */ /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp11 = z1 + z2; tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */ tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */ tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */ tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */ tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */ z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */ tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */ tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */ z1 = MULTIPLY(z2 + z4, -FIX(1.798248910)); /* -(c1+c9) */ tmp11 += z1; tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */ tmp14 += MULTIPLY(z2, -FIX(1.467221301)) + /* -(c5+c9) */ MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */ MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 10] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25, CONST_BITS - PASS1_BITS); } /* Pass 2: process 11 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 11; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp10 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp10 = LEFT_SHIFT(tmp10, CONST_BITS); z1 = (JLONG)wsptr[2]; z2 = (JLONG)wsptr[4]; z3 = (JLONG)wsptr[6]; tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */ tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */ z4 = z1 + z3; tmp24 = MULTIPLY(z4, -FIX(1.155664402)); /* -(c2-c10) */ z4 -= z2; tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */ tmp21 = tmp20 + tmp23 + tmp25 - MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */ tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */ tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */ tmp24 += tmp25; tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */ tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */ MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */ tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */ /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; tmp11 = z1 + z2; tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */ tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */ tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */ tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */ tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */ z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */ tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */ tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */ z1 = MULTIPLY(z2 + z4, -FIX(1.798248910)); /* -(c1+c9) */ tmp11 += z1; tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */ tmp14 += MULTIPLY(z2, -FIX(1.467221301)) + /* -(c5+c9) */ MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */ MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 12x12 output block. * * Optimized algorithm with 15 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/24). */ GLOBAL(void) _jpeg_idct_12x12(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 12]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ z3 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z3 = LEFT_SHIFT(z3, CONST_BITS); /* Add fudge factor here for final descale. */ z3 += ONE << (CONST_BITS - PASS1_BITS - 1); z4 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ tmp10 = z3 + z4; tmp11 = z3 - z4; z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ z1 = LEFT_SHIFT(z1, CONST_BITS); z2 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); z2 = LEFT_SHIFT(z2, CONST_BITS); tmp12 = z1 - z2; tmp21 = z3 + tmp12; tmp24 = z3 - tmp12; tmp12 = z4 + z2; tmp20 = tmp10 + tmp12; tmp25 = tmp10 - tmp12; tmp12 = z4 - z1 - z2; tmp22 = tmp11 + tmp12; tmp23 = tmp11 - tmp12; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ tmp14 = MULTIPLY(z2, -FIX_0_541196100); /* -c9 */ tmp10 = z1 + z3; tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ tmp13 = MULTIPLY(z3 + z4, -FIX(1.045510580)); /* -(c7+c11) */ tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ z1 -= z4; z2 -= z3; z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 11] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 10] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS - PASS1_BITS); } /* Pass 2: process 12 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 12; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ z3 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); z3 = LEFT_SHIFT(z3, CONST_BITS); z4 = (JLONG)wsptr[4]; z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */ tmp10 = z3 + z4; tmp11 = z3 - z4; z1 = (JLONG)wsptr[2]; z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */ z1 = LEFT_SHIFT(z1, CONST_BITS); z2 = (JLONG)wsptr[6]; z2 = LEFT_SHIFT(z2, CONST_BITS); tmp12 = z1 - z2; tmp21 = z3 + tmp12; tmp24 = z3 - tmp12; tmp12 = z4 + z2; tmp20 = tmp10 + tmp12; tmp25 = tmp10 - tmp12; tmp12 = z4 - z1 - z2; tmp22 = tmp11 + tmp12; tmp23 = tmp11 - tmp12; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */ tmp14 = MULTIPLY(z2, -FIX_0_541196100); /* -c9 */ tmp10 = z1 + z3; tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */ tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */ tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */ tmp13 = MULTIPLY(z3 + z4, -FIX(1.045510580)); /* -(c7+c11) */ tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */ tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */ tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */ MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */ z1 -= z4; z2 -= z3; z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */ tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */ tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[11] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 13x13 output block. * * Optimized algorithm with 29 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/26). */ GLOBAL(void) _jpeg_idct_13x13(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14, tmp15; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 13]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z1 = LEFT_SHIFT(z1, CONST_BITS); /* Add fudge factor here for final descale. */ z1 += ONE << (CONST_BITS - PASS1_BITS - 1); z2 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z3 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z4 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); tmp10 = z3 + z4; tmp11 = z3 - z4; tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */ tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */ tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */ tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */ tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */ tmp25 = MULTIPLY(z2, -FIX(1.252223920)) + tmp12 + tmp13; /* c4 */ tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */ tmp23 = MULTIPLY(z2, -FIX(0.170464608)) - tmp12 - tmp13; /* c12 */ tmp24 = MULTIPLY(z2, -FIX(0.803364869)) + tmp12 - tmp13; /* c8 */ tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */ /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */ tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */ tmp15 = z1 + z4; tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */ tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */ tmp14 = MULTIPLY(z2 + z3, -FIX(0.338443458)); /* -c11 */ tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */ tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */ tmp14 = MULTIPLY(z2 + z4, -FIX(1.163874945)); /* -c5 */ tmp11 += tmp14; tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */ tmp14 = MULTIPLY(z3 + z4, -FIX(0.657217813)); /* -c9 */ tmp12 += tmp14; tmp13 += tmp14; tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */ tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */ MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */ z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */ tmp14 += z1; tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */ MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 12] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 11] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 10] = (int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp26, CONST_BITS - PASS1_BITS); } /* Pass 2: process 13 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 13; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ z1 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); z1 = LEFT_SHIFT(z1, CONST_BITS); z2 = (JLONG)wsptr[2]; z3 = (JLONG)wsptr[4]; z4 = (JLONG)wsptr[6]; tmp10 = z3 + z4; tmp11 = z3 - z4; tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */ tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */ tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */ tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */ tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */ tmp25 = MULTIPLY(z2, -FIX(1.252223920)) + tmp12 + tmp13; /* c4 */ tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */ tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */ tmp23 = MULTIPLY(z2, -FIX(0.170464608)) - tmp12 - tmp13; /* c12 */ tmp24 = MULTIPLY(z2, -FIX(0.803364869)) + tmp12 - tmp13; /* c8 */ tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */ /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */ tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */ tmp15 = z1 + z4; tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */ tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */ tmp14 = MULTIPLY(z2 + z3, -FIX(0.338443458)); /* -c11 */ tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */ tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */ tmp14 = MULTIPLY(z2 + z4, -FIX(1.163874945)); /* -c5 */ tmp11 += tmp14; tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */ tmp14 = MULTIPLY(z3 + z4, -FIX(0.657217813)); /* -c9 */ tmp12 += tmp14; tmp13 += tmp14; tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */ tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */ MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */ z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */ tmp14 += z1; tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */ MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[12] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[11] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp26, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 14x14 output block. * * Optimized algorithm with 20 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/28). */ GLOBAL(void) _jpeg_idct_14x14(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 14]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z1 = LEFT_SHIFT(z1, CONST_BITS); /* Add fudge factor here for final descale. */ z1 += ONE << (CONST_BITS - PASS1_BITS - 1); z4 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ tmp10 = z1 + z2; tmp11 = z1 + z3; tmp12 = z1 - z4; tmp23 = RIGHT_SHIFT(z1 - LEFT_SHIFT(z2 + z3 - z4, 1), CONST_BITS - PASS1_BITS); /* c0 = (c4+c12-c8)*2 */ z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z2 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ MULTIPLY(z2, FIX(1.378756276)); /* c2 */ tmp20 = tmp10 + tmp13; tmp26 = tmp10 - tmp13; tmp21 = tmp11 + tmp14; tmp25 = tmp11 - tmp14; tmp22 = tmp12 + tmp15; tmp24 = tmp12 - tmp15; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp13 = LEFT_SHIFT(z4, CONST_BITS); tmp14 = z1 + z3; tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ z1 -= z2; tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */ tmp16 += tmp15; z1 += z4; z4 = MULTIPLY(z2 + z3, -FIX(0.158341681)) - tmp13; /* -c13 */ tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ tmp13 = LEFT_SHIFT(z1 - z3, PASS1_BITS); /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 13] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 12] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 11] = (int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)(tmp23 + tmp13); wsptr[8 * 10] = (int)(tmp23 - tmp13); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS - PASS1_BITS); } /* Pass 2: process 14 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 14; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ z1 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); z1 = LEFT_SHIFT(z1, CONST_BITS); z4 = (JLONG)wsptr[4]; z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */ z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */ z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */ tmp10 = z1 + z2; tmp11 = z1 + z3; tmp12 = z1 - z4; tmp23 = z1 - LEFT_SHIFT(z2 + z3 - z4, 1); /* c0 = (c4+c12-c8)*2 */ z1 = (JLONG)wsptr[2]; z2 = (JLONG)wsptr[6]; z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */ tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */ tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */ tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */ MULTIPLY(z2, FIX(1.378756276)); /* c2 */ tmp20 = tmp10 + tmp13; tmp26 = tmp10 - tmp13; tmp21 = tmp11 + tmp14; tmp25 = tmp11 - tmp14; tmp22 = tmp12 + tmp15; tmp24 = tmp12 - tmp15; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; z4 = LEFT_SHIFT(z4, CONST_BITS); tmp14 = z1 + z3; tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */ tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */ tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */ tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */ tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */ z1 -= z2; tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */ tmp16 += tmp15; tmp13 = MULTIPLY(z2 + z3, -FIX(0.158341681)) - z4; /* -c13 */ tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */ tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */ tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */ tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */ tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */ tmp13 = LEFT_SHIFT(z1 - z3, CONST_BITS) + z4; /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[13] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[12] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[11] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 15x15 output block. * * Optimized algorithm with 22 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/30). */ GLOBAL(void) _jpeg_idct_15x15(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 15]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); z1 = LEFT_SHIFT(z1, CONST_BITS); /* Add fudge factor here for final descale. */ z1 += ONE << (CONST_BITS - PASS1_BITS - 1); z2 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z3 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); z4 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */ tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */ tmp12 = z1 - tmp10; tmp13 = z1 + tmp11; z1 -= LEFT_SHIFT(tmp11 - tmp10, 1); /* c0 = (c6-c12)*2 */ z4 = z2 - z3; z3 += z2; tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */ tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */ z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */ tmp20 = tmp13 + tmp10 + tmp11; tmp23 = tmp12 - tmp10 + tmp11 + z2; tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */ tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */ tmp25 = tmp13 - tmp10 - tmp11; tmp26 = tmp12 + tmp10 - tmp11 - z2; tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */ tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */ tmp21 = tmp12 + tmp10 + tmp11; tmp24 = tmp13 - tmp10 + tmp11; tmp11 += tmp11; tmp22 = z1 + tmp11; /* c10 = c6-c12 */ tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */ /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z4 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */ z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp13 = z2 - z4; tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */ tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */ tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */ tmp13 = MULTIPLY(z2, -FIX(0.831253876)); /* -c9 */ tmp15 = MULTIPLY(z2, -FIX(1.344997024)); /* -c3 */ z2 = z1 - z4; tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */ tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */ tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */ tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */ z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */ tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */ tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */ /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 14] = (int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 13] = (int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 12] = (int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 11] = (int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 10] = (int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp27, CONST_BITS - PASS1_BITS); } /* Pass 2: process 15 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 15; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ z1 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); z1 = LEFT_SHIFT(z1, CONST_BITS); z2 = (JLONG)wsptr[2]; z3 = (JLONG)wsptr[4]; z4 = (JLONG)wsptr[6]; tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */ tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */ tmp12 = z1 - tmp10; tmp13 = z1 + tmp11; z1 -= LEFT_SHIFT(tmp11 - tmp10, 1); /* c0 = (c6-c12)*2 */ z4 = z2 - z3; z3 += z2; tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */ tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */ z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */ tmp20 = tmp13 + tmp10 + tmp11; tmp23 = tmp12 - tmp10 + tmp11 + z2; tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */ tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */ tmp25 = tmp13 - tmp10 - tmp11; tmp26 = tmp12 + tmp10 - tmp11 - z2; tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */ tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */ tmp21 = tmp12 + tmp10 + tmp11; tmp24 = tmp13 - tmp10 + tmp11; tmp11 += tmp11; tmp22 = z1 + tmp11; /* c10 = c6-c12 */ tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */ /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z4 = (JLONG)wsptr[5]; z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */ z4 = (JLONG)wsptr[7]; tmp13 = z2 - z4; tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */ tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */ tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */ tmp13 = MULTIPLY(z2, -FIX(0.831253876)); /* -c9 */ tmp15 = MULTIPLY(z2, -FIX(1.344997024)); /* -c3 */ z2 = z1 - z4; tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */ tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */ tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */ tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */ z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */ tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */ tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */ /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[14] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[13] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[12] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[11] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp27, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } /* * Perform dequantization and inverse DCT on one block of coefficients, * producing a 16x16 output block. * * Optimized algorithm with 28 multiplications in the 1-D kernel. * cK represents sqrt(2) * cos(K*pi/32). */ GLOBAL(void) _jpeg_idct_16x16(j_decompress_ptr cinfo, jpeg_component_info *compptr, JCOEFPTR coef_block, _JSAMPARRAY output_buf, JDIMENSION output_col) { JLONG tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13; JLONG tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27; JLONG z1, z2, z3, z4; JCOEFPTR inptr; ISLOW_MULT_TYPE *quantptr; int *wsptr; _JSAMPROW outptr; _JSAMPLE *range_limit = IDCT_range_limit(cinfo); int ctr; int workspace[8 * 16]; /* buffers data between passes */ SHIFT_TEMPS /* Pass 1: process columns from input, store into work array. */ inptr = coef_block; quantptr = (ISLOW_MULT_TYPE *)compptr->dct_table; wsptr = workspace; for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) { /* Even part */ tmp0 = DEQUANTIZE(inptr[DCTSIZE * 0], quantptr[DCTSIZE * 0]); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); /* Add fudge factor here for final descale. */ tmp0 += ONE << (CONST_BITS - PASS1_BITS - 1); z1 = DEQUANTIZE(inptr[DCTSIZE * 4], quantptr[DCTSIZE * 4]); tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ tmp10 = tmp0 + tmp1; tmp11 = tmp0 - tmp1; tmp12 = tmp0 + tmp2; tmp13 = tmp0 - tmp2; z1 = DEQUANTIZE(inptr[DCTSIZE * 2], quantptr[DCTSIZE * 2]); z2 = DEQUANTIZE(inptr[DCTSIZE * 6], quantptr[DCTSIZE * 6]); z3 = z1 - z2; z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ tmp20 = tmp10 + tmp0; tmp27 = tmp10 - tmp0; tmp21 = tmp12 + tmp1; tmp26 = tmp12 - tmp1; tmp22 = tmp13 + tmp2; tmp25 = tmp13 - tmp2; tmp23 = tmp11 + tmp3; tmp24 = tmp11 - tmp3; /* Odd part */ z1 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); z2 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); z3 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]); z4 = DEQUANTIZE(inptr[DCTSIZE * 7], quantptr[DCTSIZE * 7]); tmp11 = z1 + z3; tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ tmp13 = tmp10 + tmp11 + tmp12 - MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ z2 += z4; z1 = MULTIPLY(z2, -FIX(0.666655658)); /* -c11 */ tmp1 += z1; tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ z2 = MULTIPLY(z2, -FIX(1.247225013)); /* -c5 */ tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ tmp12 += z2; z2 = MULTIPLY(z3 + z4, -FIX(1.353318001)); /* -c3 */ tmp2 += z2; tmp3 += z2; z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ tmp10 += z2; tmp11 += z2; /* Final output stage */ wsptr[8 * 0] = (int)RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS - PASS1_BITS); wsptr[8 * 15] = (int)RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS - PASS1_BITS); wsptr[8 * 1] = (int)RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS - PASS1_BITS); wsptr[8 * 14] = (int)RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS - PASS1_BITS); wsptr[8 * 2] = (int)RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS - PASS1_BITS); wsptr[8 * 13] = (int)RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS - PASS1_BITS); wsptr[8 * 3] = (int)RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS - PASS1_BITS); wsptr[8 * 12] = (int)RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS - PASS1_BITS); wsptr[8 * 4] = (int)RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 11] = (int)RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS - PASS1_BITS); wsptr[8 * 5] = (int)RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 10] = (int)RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS - PASS1_BITS); wsptr[8 * 6] = (int)RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 9] = (int)RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS - PASS1_BITS); wsptr[8 * 7] = (int)RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS - PASS1_BITS); wsptr[8 * 8] = (int)RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS - PASS1_BITS); } /* Pass 2: process 16 rows from work array, store into output array. */ wsptr = workspace; for (ctr = 0; ctr < 16; ctr++) { outptr = output_buf[ctr] + output_col; /* Even part */ /* Add fudge factor here for final descale. */ tmp0 = (JLONG)wsptr[0] + (ONE << (PASS1_BITS + 2)); tmp0 = LEFT_SHIFT(tmp0, CONST_BITS); z1 = (JLONG)wsptr[4]; tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */ tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */ tmp10 = tmp0 + tmp1; tmp11 = tmp0 - tmp1; tmp12 = tmp0 + tmp2; tmp13 = tmp0 - tmp2; z1 = (JLONG)wsptr[2]; z2 = (JLONG)wsptr[6]; z3 = z1 - z2; z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */ z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */ tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */ tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */ tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */ tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */ tmp20 = tmp10 + tmp0; tmp27 = tmp10 - tmp0; tmp21 = tmp12 + tmp1; tmp26 = tmp12 - tmp1; tmp22 = tmp13 + tmp2; tmp25 = tmp13 - tmp2; tmp23 = tmp11 + tmp3; tmp24 = tmp11 - tmp3; /* Odd part */ z1 = (JLONG)wsptr[1]; z2 = (JLONG)wsptr[3]; z3 = (JLONG)wsptr[5]; z4 = (JLONG)wsptr[7]; tmp11 = z1 + z3; tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */ tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */ tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */ tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */ tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */ tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */ tmp0 = tmp1 + tmp2 + tmp3 - MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */ tmp13 = tmp10 + tmp11 + tmp12 - MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */ z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */ tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */ tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */ z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */ tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */ tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */ z2 += z4; z1 = MULTIPLY(z2, -FIX(0.666655658)); /* -c11 */ tmp1 += z1; tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */ z2 = MULTIPLY(z2, -FIX(1.247225013)); /* -c5 */ tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */ tmp12 += z2; z2 = MULTIPLY(z3 + z4, -FIX(1.353318001)); /* -c3 */ tmp2 += z2; tmp3 += z2; z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */ tmp10 += z2; tmp11 += z2; /* Final output stage */ outptr[0] = range_limit[(int)RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[15] = range_limit[(int)RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[1] = range_limit[(int)RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[14] = range_limit[(int)RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[2] = range_limit[(int)RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[13] = range_limit[(int)RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[3] = range_limit[(int)RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[12] = range_limit[(int)RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[4] = range_limit[(int)RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[11] = range_limit[(int)RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[5] = range_limit[(int)RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[10] = range_limit[(int)RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[6] = range_limit[(int)RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[9] = range_limit[(int)RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[7] = range_limit[(int)RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; outptr[8] = range_limit[(int)RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS + PASS1_BITS + 3) & RANGE_MASK]; wsptr += 8; /* advance pointer to next row */ } } #endif /* IDCT_SCALING_SUPPORTED */ #endif /* DCT_ISLOW_SUPPORTED */
c
github
https://github.com/opencv/opencv
3rdparty/libjpeg-turbo/src/jidctint.c
// Package oauthserver is a very simplistic OAuth server used only for // the testing of the "terraform login" and "terraform logout" commands. package oauthserver import ( "crypto/sha256" "encoding/base64" "fmt" "html" "log" "net/http" "net/url" "strings" ) // Handler is an implementation of net/http.Handler that provides a stub // OAuth server implementation with the following endpoints: // // /authz - authorization endpoint // /token - token endpoint // /revoke - token revocation (logout) endpoint // // The authorization endpoint returns HTML per normal OAuth conventions, but // it also includes an HTTP header X-Redirect-To giving the same URL that the // link in the HTML indicates, allowing a non-browser user-agent to traverse // this robotically in automated tests. var Handler http.Handler type handler struct{} func (h handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { switch req.URL.Path { case "/authz": h.serveAuthz(resp, req) case "/token": h.serveToken(resp, req) case "/revoke": h.serveRevoke(resp, req) default: resp.WriteHeader(404) } } func (h handler) serveAuthz(resp http.ResponseWriter, req *http.Request) { args := req.URL.Query() if rt := args.Get("response_type"); rt != "code" { resp.WriteHeader(400) resp.Write([]byte("wrong response_type")) log.Printf("/authz: incorrect response type %q", rt) return } redirectURL, err := url.Parse(args.Get("redirect_uri")) if err != nil { resp.WriteHeader(400) resp.Write([]byte(fmt.Sprintf("invalid redirect_uri %s: %s", args.Get("redirect_uri"), err))) return } state := args.Get("state") challenge := args.Get("code_challenge") challengeMethod := args.Get("code_challenge_method") if challengeMethod == "" { challengeMethod = "plain" } // NOTE: This is not a suitable implementation for a real OAuth server // because the code challenge is providing no security whatsoever. This // is just a simple implementation for this stub server. code := fmt.Sprintf("%s:%s", challengeMethod, challenge) redirectQuery := redirectURL.Query() redirectQuery.Set("code", code) if state != "" { redirectQuery.Set("state", state) } redirectURL.RawQuery = redirectQuery.Encode() respBody := fmt.Sprintf(`<a href="%s">Log In and Consent</a>`, html.EscapeString(redirectURL.String())) resp.Header().Set("Content-Type", "text/html") resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(respBody))) resp.Header().Set("X-Redirect-To", redirectURL.String()) // For robotic clients, using webbrowser.MockLauncher resp.WriteHeader(200) resp.Write([]byte(respBody)) } func (h handler) serveToken(resp http.ResponseWriter, req *http.Request) { if req.Method != "POST" { resp.WriteHeader(405) log.Printf("/token: unsupported request method %q", req.Method) return } if err := req.ParseForm(); err != nil { resp.WriteHeader(500) log.Printf("/token: error parsing body: %s", err) return } grantType := req.Form.Get("grant_type") log.Printf("/token: grant_type is %q", grantType) switch grantType { case "authorization_code": code := req.Form.Get("code") codeParts := strings.SplitN(code, ":", 2) if len(codeParts) != 2 { log.Printf("/token: invalid code %q", code) resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(400) resp.Write([]byte(`{"error":"invalid_grant"}`)) return } codeVerifier := req.Form.Get("code_verifier") switch codeParts[0] { case "plain": if codeParts[1] != codeVerifier { log.Printf("/token: incorrect code verifier %q; want %q", codeParts[1], codeVerifier) resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(400) resp.Write([]byte(`{"error":"invalid_grant"}`)) return } case "S256": h := sha256.New() h.Write([]byte(codeVerifier)) encVerifier := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) if codeParts[1] != encVerifier { log.Printf("/token: incorrect code verifier %q; want %q", codeParts[1], encVerifier) resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(400) resp.Write([]byte(`{"error":"invalid_grant"}`)) return } default: log.Printf("/token: unsupported challenge method %q", codeParts[0]) resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(400) resp.Write([]byte(`{"error":"invalid_grant"}`)) return } resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(200) resp.Write([]byte(`{"access_token":"good-token","token_type":"bearer"}`)) log.Println("/token: successful request") case "password": username := req.Form.Get("username") password := req.Form.Get("password") if username == "wrong" || password == "wrong" { // These special "credentials" allow testing for the error case. resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(400) resp.Write([]byte(`{"error":"invalid_grant"}`)) log.Println("/token: 'wrong' credentials") return } resp.Header().Set("Content-Type", "application/json") resp.WriteHeader(200) resp.Write([]byte(`{"access_token":"good-token","token_type":"bearer"}`)) log.Println("/token: successful request") default: resp.WriteHeader(400) log.Printf("/token: unsupported grant type %q", grantType) } } func (h handler) serveRevoke(resp http.ResponseWriter, req *http.Request) { resp.WriteHeader(404) } func init() { Handler = handler{} }
go
github
https://github.com/hashicorp/terraform
internal/command/testdata/login-oauth-server/oauthserver.go
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.network', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## packetbb.h (module 'network'): ns3::PbbAddressLength [enumeration] module.add_enum('PbbAddressLength', ['IPV4', 'IPV6']) ## ethernet-header.h (module 'network'): ns3::ethernet_header_t [enumeration] module.add_enum('ethernet_header_t', ['LENGTH', 'VLAN', 'QINQ']) ## address.h (module 'network'): ns3::Address [class] module.add_class('Address') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address']) ## application-container.h (module 'network'): ns3::ApplicationContainer [class] module.add_class('ApplicationContainer') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class] module.add_class('AsciiTraceHelper') ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class] module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## channel-list.h (module 'network'): ns3::ChannelList [class] module.add_class('ChannelList') ## data-rate.h (module 'network'): ns3::DataRate [class] module.add_class('DataRate') ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] module.add_class('Inet6SocketAddress') ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class] root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] module.add_class('InetSocketAddress') ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class] root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## mac64-address.h (module 'network'): ns3::Mac64Address [class] module.add_class('Mac64Address') ## mac64-address.h (module 'network'): ns3::Mac64Address [class] root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address']) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class] module.add_class('NetDeviceContainer') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer') ## node-list.h (module 'network'): ns3::NodeList [class] module.add_class('NodeList') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## object-factory.h (module 'core'): ns3::ObjectFactory [class] module.add_class('ObjectFactory', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', outer_class=root_module['ns3::PacketMetadata']) ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class] module.add_class('PacketSocketAddress') ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class] root_module['ns3::PacketSocketAddress'].implicitly_converts_to(root_module['ns3::Address']) ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper [class] module.add_class('PacketSocketHelper') ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', outer_class=root_module['ns3::PacketTagList']) ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock [class] module.add_class('PbbAddressTlvBlock') ## packetbb.h (module 'network'): ns3::PbbTlvBlock [class] module.add_class('PbbTlvBlock') ## pcap-file.h (module 'network'): ns3::PcapFile [class] module.add_class('PcapFile') ## trace-helper.h (module 'network'): ns3::PcapHelper [class] module.add_class('PcapHelper') ## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration] module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper']) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class] module.add_class('PcapHelperForDevice', allow_subclassing=True) ## random-variable.h (module 'core'): ns3::RandomVariable [class] module.add_class('RandomVariable', import_from_module='ns.core') ## random-variable.h (module 'core'): ns3::SeedManager [class] module.add_class('SeedManager', import_from_module='ns.core') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int> [class] module.add_class('SequenceNumber32') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short> [class] module.add_class('SequenceNumber16') ## random-variable.h (module 'core'): ns3::SequentialVariable [class] module.add_class('SequentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simulator.h (module 'core'): ns3::Simulator [class] module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core') ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class] module.add_class('SystemWallClockMs', import_from_module='ns.core') ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer') ## random-variable.h (module 'core'): ns3::TriangularVariable [class] module.add_class('TriangularVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## random-variable.h (module 'core'): ns3::UniformVariable [class] module.add_class('UniformVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::WeibullVariable [class] module.add_class('WeibullVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ZetaVariable [class] module.add_class('ZetaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ZipfVariable [class] module.add_class('ZipfVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', parent=root_module['ns3::ObjectBase']) ## random-variable.h (module 'core'): ns3::ConstantVariable [class] module.add_class('ConstantVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::DeterministicVariable [class] module.add_class('DeterministicVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::EmpiricalVariable [class] module.add_class('EmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ErlangVariable [class] module.add_class('ErlangVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::ExponentialVariable [class] module.add_class('ExponentialVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag [class] module.add_class('FlowIdTag', parent=root_module['ns3::Tag']) ## random-variable.h (module 'core'): ns3::GammaVariable [class] module.add_class('GammaVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', parent=root_module['ns3::Chunk']) ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable [class] module.add_class('IntEmpiricalVariable', import_from_module='ns.core', parent=root_module['ns3::EmpiricalVariable']) ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader [class] module.add_class('LlcSnapHeader', parent=root_module['ns3::Header']) ## random-variable.h (module 'core'): ns3::LogNormalVariable [class] module.add_class('LogNormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## random-variable.h (module 'core'): ns3::NormalVariable [class] module.add_class('NormalVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## packet-burst.h (module 'network'): ns3::PacketBurst [class] module.add_class('PacketBurst', parent=root_module['ns3::Object']) ## random-variable.h (module 'core'): ns3::ParetoVariable [class] module.add_class('ParetoVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariable']) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class] module.add_class('PcapFileWrapper', parent=root_module['ns3::Object']) ## queue.h (module 'network'): ns3::Queue [class] module.add_class('Queue', parent=root_module['ns3::Object']) ## queue.h (module 'network'): ns3::Queue::QueueMode [enumeration] module.add_enum('QueueMode', ['QUEUE_MODE_PACKETS', 'QUEUE_MODE_BYTES'], outer_class=root_module['ns3::Queue']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [class] module.add_class('RadiotapHeader', parent=root_module['ns3::Header']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [enumeration] module.add_enum('', ['FRAME_FLAG_NONE', 'FRAME_FLAG_CFP', 'FRAME_FLAG_SHORT_PREAMBLE', 'FRAME_FLAG_WEP', 'FRAME_FLAG_FRAGMENTED', 'FRAME_FLAG_FCS_INCLUDED', 'FRAME_FLAG_DATA_PADDING', 'FRAME_FLAG_BAD_FCS', 'FRAME_FLAG_SHORT_GUARD'], outer_class=root_module['ns3::RadiotapHeader']) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader [enumeration] module.add_enum('', ['CHANNEL_FLAG_NONE', 'CHANNEL_FLAG_TURBO', 'CHANNEL_FLAG_CCK', 'CHANNEL_FLAG_OFDM', 'CHANNEL_FLAG_SPECTRUM_2GHZ', 'CHANNEL_FLAG_SPECTRUM_5GHZ', 'CHANNEL_FLAG_PASSIVE', 'CHANNEL_FLAG_DYNAMIC', 'CHANNEL_FLAG_GFSK'], outer_class=root_module['ns3::RadiotapHeader']) ## red-queue.h (module 'network'): ns3::RedQueue [class] module.add_class('RedQueue', parent=root_module['ns3::Queue']) ## red-queue.h (module 'network'): ns3::RedQueue [enumeration] module.add_enum('', ['DTYPE_NONE', 'DTYPE_FORCED', 'DTYPE_UNFORCED'], outer_class=root_module['ns3::RedQueue']) ## red-queue.h (module 'network'): ns3::RedQueue::Stats [struct] module.add_class('Stats', outer_class=root_module['ns3::RedQueue']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbAddressBlock', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbAddressBlock>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbMessage', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbMessage>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbPacket', 'ns3::Header', 'ns3::DefaultDeleter<ns3::PbbPacket>'], parent=root_module['ns3::Header'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::PbbTlv', 'ns3::empty', 'ns3::DefaultDeleter<ns3::PbbTlv>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## socket.h (module 'network'): ns3::Socket [class] module.add_class('Socket', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration] module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket']) ## socket.h (module 'network'): ns3::Socket::SocketType [enumeration] module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket']) ## socket.h (module 'network'): ns3::SocketAddressTag [class] module.add_class('SocketAddressTag', parent=root_module['ns3::Tag']) ## socket-factory.h (module 'network'): ns3::SocketFactory [class] module.add_class('SocketFactory', parent=root_module['ns3::Object']) ## socket.h (module 'network'): ns3::SocketIpTtlTag [class] module.add_class('SocketIpTtlTag', parent=root_module['ns3::Tag']) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class] module.add_class('SocketSetDontFragmentTag', parent=root_module['ns3::Tag']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', parent=root_module['ns3::Chunk']) ## application.h (module 'network'): ns3::Application [class] module.add_class('Application', parent=root_module['ns3::Object']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## boolean.h (module 'core'): ns3::BooleanChecker [class] module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## boolean.h (module 'core'): ns3::BooleanValue [class] module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel.h (module 'network'): ns3::Channel [class] module.add_class('Channel', parent=root_module['ns3::Object']) ## data-rate.h (module 'network'): ns3::DataRateChecker [class] module.add_class('DataRateChecker', parent=root_module['ns3::AttributeChecker']) ## data-rate.h (module 'network'): ns3::DataRateValue [class] module.add_class('DataRateValue', parent=root_module['ns3::AttributeValue']) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue [class] module.add_class('DropTailQueue', parent=root_module['ns3::Queue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::ErrorModel [class] module.add_class('ErrorModel', parent=root_module['ns3::Object']) ## ethernet-header.h (module 'network'): ns3::EthernetHeader [class] module.add_class('EthernetHeader', parent=root_module['ns3::Header']) ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer [class] module.add_class('EthernetTrailer', parent=root_module['ns3::Trailer']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::ListErrorModel [class] module.add_class('ListErrorModel', parent=root_module['ns3::ErrorModel']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', parent=root_module['ns3::Object']) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class] module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class] module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class] module.add_class('OutputStreamWrapper', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## packet-socket.h (module 'network'): ns3::PacketSocket [class] module.add_class('PacketSocket', parent=root_module['ns3::Socket']) ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory [class] module.add_class('PacketSocketFactory', parent=root_module['ns3::SocketFactory']) ## packetbb.h (module 'network'): ns3::PbbAddressBlock [class] module.add_class('PbbAddressBlock', parent=root_module['ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >']) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4 [class] module.add_class('PbbAddressBlockIpv4', parent=root_module['ns3::PbbAddressBlock']) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6 [class] module.add_class('PbbAddressBlockIpv6', parent=root_module['ns3::PbbAddressBlock']) ## packetbb.h (module 'network'): ns3::PbbMessage [class] module.add_class('PbbMessage', parent=root_module['ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >']) ## packetbb.h (module 'network'): ns3::PbbMessageIpv4 [class] module.add_class('PbbMessageIpv4', parent=root_module['ns3::PbbMessage']) ## packetbb.h (module 'network'): ns3::PbbMessageIpv6 [class] module.add_class('PbbMessageIpv6', parent=root_module['ns3::PbbMessage']) ## packetbb.h (module 'network'): ns3::PbbPacket [class] module.add_class('PbbPacket', parent=root_module['ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >']) ## packetbb.h (module 'network'): ns3::PbbTlv [class] module.add_class('PbbTlv', parent=root_module['ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >']) ## random-variable.h (module 'core'): ns3::RandomVariableChecker [class] module.add_class('RandomVariableChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## random-variable.h (module 'core'): ns3::RandomVariableValue [class] module.add_class('RandomVariableValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## error-model.h (module 'network'): ns3::RateErrorModel [class] module.add_class('RateErrorModel', parent=root_module['ns3::ErrorModel']) ## error-model.h (module 'network'): ns3::RateErrorModel::ErrorUnit [enumeration] module.add_enum('ErrorUnit', ['ERROR_UNIT_BIT', 'ERROR_UNIT_BYTE', 'ERROR_UNIT_PACKET'], outer_class=root_module['ns3::RateErrorModel']) ## error-model.h (module 'network'): ns3::ReceiveListErrorModel [class] module.add_class('ReceiveListErrorModel', parent=root_module['ns3::ErrorModel']) ## simple-channel.h (module 'network'): ns3::SimpleChannel [class] module.add_class('SimpleChannel', parent=root_module['ns3::Channel']) ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice [class] module.add_class('SimpleNetDevice', parent=root_module['ns3::NetDevice']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', parent=root_module['ns3::AttributeValue']) ## packetbb.h (module 'network'): ns3::PbbAddressTlv [class] module.add_class('PbbAddressTlv', parent=root_module['ns3::PbbTlv']) module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list') module.add_container('std::list< unsigned int >', 'unsigned int', container_type='list') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxEndOkCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxEndOkCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxEndOkCallback&') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >', 'ns3::SequenceNumber16') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >*', 'ns3::SequenceNumber16*') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned short, short >&', 'ns3::SequenceNumber16&') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >', 'ns3::SequenceNumber32') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >*', 'ns3::SequenceNumber32*') typehandlers.add_type_alias('ns3::SequenceNumber< unsigned int, int >&', 'ns3::SequenceNumber32&') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxStartCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxStartCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxStartCallback&') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyTxStartCallback') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyTxStartCallback*') typehandlers.add_type_alias('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyTxStartCallback&') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyRxEndErrorCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyRxEndErrorCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyRxEndErrorCallback&') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ns3::GenericPhyTxEndCallback') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', 'ns3::GenericPhyTxEndCallback*') typehandlers.add_type_alias('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', 'ns3::GenericPhyTxEndCallback&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace addressUtils nested_module = module.add_cpp_namespace('addressUtils') register_types_ns3_addressUtils(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_addressUtils(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3ApplicationContainer_methods(root_module, root_module['ns3::ApplicationContainer']) register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper']) register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3ChannelList_methods(root_module, root_module['ns3::ChannelList']) register_Ns3DataRate_methods(root_module, root_module['ns3::DataRate']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress']) register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3Mac64Address_methods(root_module, root_module['ns3::Mac64Address']) register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3NodeList_methods(root_module, root_module['ns3::NodeList']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketSocketAddress_methods(root_module, root_module['ns3::PacketSocketAddress']) register_Ns3PacketSocketHelper_methods(root_module, root_module['ns3::PacketSocketHelper']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3PbbAddressTlvBlock_methods(root_module, root_module['ns3::PbbAddressTlvBlock']) register_Ns3PbbTlvBlock_methods(root_module, root_module['ns3::PbbTlvBlock']) register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile']) register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper']) register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice']) register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable']) register_Ns3SeedManager_methods(root_module, root_module['ns3::SeedManager']) register_Ns3SequenceNumber32_methods(root_module, root_module['ns3::SequenceNumber32']) register_Ns3SequenceNumber16_methods(root_module, root_module['ns3::SequenceNumber16']) register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator']) register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable']) register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable']) register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable']) register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable']) register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable']) register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable']) register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable']) register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable']) register_Ns3FlowIdTag_methods(root_module, root_module['ns3::FlowIdTag']) register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable']) register_Ns3LlcSnapHeader_methods(root_module, root_module['ns3::LlcSnapHeader']) register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable']) register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PacketBurst_methods(root_module, root_module['ns3::PacketBurst']) register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable']) register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper']) register_Ns3Queue_methods(root_module, root_module['ns3::Queue']) register_Ns3RadiotapHeader_methods(root_module, root_module['ns3::RadiotapHeader']) register_Ns3RedQueue_methods(root_module, root_module['ns3::RedQueue']) register_Ns3RedQueueStats_methods(root_module, root_module['ns3::RedQueue::Stats']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >']) register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >']) register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >']) register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Socket_methods(root_module, root_module['ns3::Socket']) register_Ns3SocketAddressTag_methods(root_module, root_module['ns3::SocketAddressTag']) register_Ns3SocketFactory_methods(root_module, root_module['ns3::SocketFactory']) register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag']) register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3Application_methods(root_module, root_module['ns3::Application']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker']) register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3Channel_methods(root_module, root_module['ns3::Channel']) register_Ns3DataRateChecker_methods(root_module, root_module['ns3::DataRateChecker']) register_Ns3DataRateValue_methods(root_module, root_module['ns3::DataRateValue']) register_Ns3DropTailQueue_methods(root_module, root_module['ns3::DropTailQueue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErrorModel_methods(root_module, root_module['ns3::ErrorModel']) register_Ns3EthernetHeader_methods(root_module, root_module['ns3::EthernetHeader']) register_Ns3EthernetTrailer_methods(root_module, root_module['ns3::EthernetTrailer']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3ListErrorModel_methods(root_module, root_module['ns3::ListErrorModel']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker']) register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue']) register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3PacketSocket_methods(root_module, root_module['ns3::PacketSocket']) register_Ns3PacketSocketFactory_methods(root_module, root_module['ns3::PacketSocketFactory']) register_Ns3PbbAddressBlock_methods(root_module, root_module['ns3::PbbAddressBlock']) register_Ns3PbbAddressBlockIpv4_methods(root_module, root_module['ns3::PbbAddressBlockIpv4']) register_Ns3PbbAddressBlockIpv6_methods(root_module, root_module['ns3::PbbAddressBlockIpv6']) register_Ns3PbbMessage_methods(root_module, root_module['ns3::PbbMessage']) register_Ns3PbbMessageIpv4_methods(root_module, root_module['ns3::PbbMessageIpv4']) register_Ns3PbbMessageIpv6_methods(root_module, root_module['ns3::PbbMessageIpv6']) register_Ns3PbbPacket_methods(root_module, root_module['ns3::PbbPacket']) register_Ns3PbbTlv_methods(root_module, root_module['ns3::PbbTlv']) register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker']) register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue']) register_Ns3RateErrorModel_methods(root_module, root_module['ns3::RateErrorModel']) register_Ns3ReceiveListErrorModel_methods(root_module, root_module['ns3::ReceiveListErrorModel']) register_Ns3SimpleChannel_methods(root_module, root_module['ns3::SimpleChannel']) register_Ns3SimpleNetDevice_methods(root_module, root_module['ns3::SimpleNetDevice']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3PbbAddressTlv_methods(root_module, root_module['ns3::PbbAddressTlv']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3ApplicationContainer_methods(root_module, cls): ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::ApplicationContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::ApplicationContainer const &', 'arg0')]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer() [constructor] cls.add_constructor([]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(ns3::Ptr<ns3::Application> application) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Application >', 'application')]) ## application-container.h (module 'network'): ns3::ApplicationContainer::ApplicationContainer(std::string name) [constructor] cls.add_constructor([param('std::string', 'name')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::ApplicationContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::ApplicationContainer', 'other')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Application >', 'application')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Add(std::string name) [member function] cls.add_method('Add', 'void', [param('std::string', 'name')]) ## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) ## application-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Application>*,std::vector<ns3::Ptr<ns3::Application>, std::allocator<ns3::Ptr<ns3::Application> > > > ns3::ApplicationContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Application > const, std::vector< ns3::Ptr< ns3::Application > > >', [], is_const=True) ## application-container.h (module 'network'): ns3::Ptr<ns3::Application> ns3::ApplicationContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'i')], is_const=True) ## application-container.h (module 'network'): uint32_t ns3::ApplicationContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Start(ns3::Time start) [member function] cls.add_method('Start', 'void', [param('ns3::Time', 'start')]) ## application-container.h (module 'network'): void ns3::ApplicationContainer::Stop(ns3::Time stop) [member function] cls.add_method('Stop', 'void', [param('ns3::Time', 'stop')]) return def register_Ns3AsciiTraceHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function] cls.add_method('CreateFileStream', 'ns3::Ptr< ns3::OutputStreamWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')]) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDequeueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultDropSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultEnqueueSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('DefaultReceiveSinkWithoutContext', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')], is_static=True) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function] cls.add_method('EnableAscii', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function] cls.add_method('EnableAscii', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function] cls.add_method('EnableAsciiAll', 'void', [param('std::string', 'prefix')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function] cls.add_method('EnableAsciiAll', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')]) ## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function] cls.add_method('EnableAsciiInternal', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'bool', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'bool', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function] cls.add_method('CreateFullCopy', 'ns3::Buffer', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function] cls.add_method('GetCurrentEndOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function] cls.add_method('GetCurrentStartOffset', 'int32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3ChannelList_methods(root_module, cls): ## channel-list.h (module 'network'): ns3::ChannelList::ChannelList() [constructor] cls.add_constructor([]) ## channel-list.h (module 'network'): ns3::ChannelList::ChannelList(ns3::ChannelList const & arg0) [copy constructor] cls.add_constructor([param('ns3::ChannelList const &', 'arg0')]) ## channel-list.h (module 'network'): static uint32_t ns3::ChannelList::Add(ns3::Ptr<ns3::Channel> channel) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Channel >', 'channel')], is_static=True) ## channel-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Channel>*,std::vector<ns3::Ptr<ns3::Channel>, std::allocator<ns3::Ptr<ns3::Channel> > > > ns3::ChannelList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Channel > const, std::vector< ns3::Ptr< ns3::Channel > > >', [], is_static=True) ## channel-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Channel>*,std::vector<ns3::Ptr<ns3::Channel>, std::allocator<ns3::Ptr<ns3::Channel> > > > ns3::ChannelList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Channel > const, std::vector< ns3::Ptr< ns3::Channel > > >', [], is_static=True) ## channel-list.h (module 'network'): static ns3::Ptr<ns3::Channel> ns3::ChannelList::GetChannel(uint32_t n) [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [param('uint32_t', 'n')], is_static=True) ## channel-list.h (module 'network'): static uint32_t ns3::ChannelList::GetNChannels() [member function] cls.add_method('GetNChannels', 'uint32_t', [], is_static=True) return def register_Ns3DataRate_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## data-rate.h (module 'network'): ns3::DataRate::DataRate(ns3::DataRate const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRate const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(uint64_t bps) [constructor] cls.add_constructor([param('uint64_t', 'bps')]) ## data-rate.h (module 'network'): ns3::DataRate::DataRate(std::string rate) [constructor] cls.add_constructor([param('std::string', 'rate')]) ## data-rate.h (module 'network'): double ns3::DataRate::CalculateTxTime(uint32_t bytes) const [member function] cls.add_method('CalculateTxTime', 'double', [param('uint32_t', 'bytes')], is_const=True) ## data-rate.h (module 'network'): uint64_t ns3::DataRate::GetBitRate() const [member function] cls.add_method('GetBitRate', 'uint64_t', [], is_const=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) return def register_Ns3Inet6SocketAddress_methods(root_module, cls): ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor] cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')]) ## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor] cls.add_constructor([param('char const *', 'ipv6')]) ## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function] cls.add_method('ConvertFrom', 'ns3::Inet6SocketAddress', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function] cls.add_method('GetIpv6', 'ns3::Ipv6Address', [], is_const=True) ## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'addr')], is_static=True) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function] cls.add_method('SetIpv6', 'void', [param('ns3::Ipv6Address', 'ipv6')]) ## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3InetSocketAddress_methods(root_module, cls): ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor] cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor] cls.add_constructor([param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor] cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')]) ## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor] cls.add_constructor([param('char const *', 'ipv4')]) ## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::InetSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function] cls.add_method('GetIpv4', 'ns3::Ipv4Address', [], is_const=True) ## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function] cls.add_method('GetPort', 'uint16_t', [], is_const=True) ## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function] cls.add_method('SetIpv4', 'void', [param('ns3::Ipv4Address', 'address')]) ## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function] cls.add_method('SetPort', 'void', [param('uint16_t', 'port')]) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function] cls.add_method('IsIpv4MappedAddress', 'bool', []) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3Mac64Address_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(ns3::Mac64Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac64Address const &', 'arg0')]) ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address() [constructor] cls.add_constructor([]) ## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac64Address', [], is_static=True) ## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac64Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac64-address.h (module 'network'): static bool ns3::Mac64Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3NetDeviceContainer_methods(root_module, cls): ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor] cls.add_constructor([]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor] cls.add_constructor([param('std::string', 'devName')]) ## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor] cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NetDeviceContainer', 'other')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function] cls.add_method('Add', 'void', [param('std::string', 'deviceName')]) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >', [], is_const=True) ## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True) ## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3NodeList_methods(root_module, cls): ## node-list.h (module 'network'): ns3::NodeList::NodeList() [constructor] cls.add_constructor([]) ## node-list.h (module 'network'): ns3::NodeList::NodeList(ns3::NodeList const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeList const &', 'arg0')]) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'uint32_t', [param('ns3::Ptr< ns3::Node >', 'node')], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::Begin() [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeList::End() [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_static=True) ## node-list.h (module 'network'): static uint32_t ns3::NodeList::GetNNodes() [member function] cls.add_method('GetNNodes', 'uint32_t', [], is_static=True) ## node-list.h (module 'network'): static ns3::Ptr<ns3::Node> ns3::NodeList::GetNode(uint32_t n) [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'n')], is_static=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ObjectFactory_methods(root_module, cls): cls.add_output_stream_operator() ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor] cls.add_constructor([param('std::string', 'typeId')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::Object >', [], is_const=True) ## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) ## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function] cls.add_method('SetTypeId', 'void', [param('ns3::TypeId', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function] cls.add_method('SetTypeId', 'void', [param('char const *', 'tid')]) ## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function] cls.add_method('SetTypeId', 'void', [param('std::string', 'tid')]) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketSocketAddress_methods(root_module, cls): ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress::PacketSocketAddress(ns3::PacketSocketAddress const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketAddress const &', 'arg0')]) ## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress::PacketSocketAddress() [constructor] cls.add_constructor([]) ## packet-socket-address.h (module 'network'): static ns3::PacketSocketAddress ns3::PacketSocketAddress::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::PacketSocketAddress', [param('ns3::Address const &', 'address')], is_static=True) ## packet-socket-address.h (module 'network'): ns3::Address ns3::PacketSocketAddress::GetPhysicalAddress() const [member function] cls.add_method('GetPhysicalAddress', 'ns3::Address', [], is_const=True) ## packet-socket-address.h (module 'network'): uint16_t ns3::PacketSocketAddress::GetProtocol() const [member function] cls.add_method('GetProtocol', 'uint16_t', [], is_const=True) ## packet-socket-address.h (module 'network'): uint32_t ns3::PacketSocketAddress::GetSingleDevice() const [member function] cls.add_method('GetSingleDevice', 'uint32_t', [], is_const=True) ## packet-socket-address.h (module 'network'): static bool ns3::PacketSocketAddress::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## packet-socket-address.h (module 'network'): bool ns3::PacketSocketAddress::IsSingleDevice() const [member function] cls.add_method('IsSingleDevice', 'bool', [], is_const=True) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetAllDevices() [member function] cls.add_method('SetAllDevices', 'void', []) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetPhysicalAddress(ns3::Address const address) [member function] cls.add_method('SetPhysicalAddress', 'void', [param('ns3::Address const', 'address')]) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetProtocol(uint16_t protocol) [member function] cls.add_method('SetProtocol', 'void', [param('uint16_t', 'protocol')]) ## packet-socket-address.h (module 'network'): void ns3::PacketSocketAddress::SetSingleDevice(uint32_t device) [member function] cls.add_method('SetSingleDevice', 'void', [param('uint32_t', 'device')]) return def register_Ns3PacketSocketHelper_methods(root_module, cls): ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper::PacketSocketHelper() [constructor] cls.add_constructor([]) ## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper::PacketSocketHelper(ns3::PacketSocketHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketHelper const &', 'arg0')]) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(ns3::Ptr<ns3::Node> node) const [member function] cls.add_method('Install', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(std::string nodeName) const [member function] cls.add_method('Install', 'void', [param('std::string', 'nodeName')], is_const=True) ## packet-socket-helper.h (module 'network'): void ns3::PacketSocketHelper::Install(ns3::NodeContainer c) const [member function] cls.add_method('Install', 'void', [param('ns3::NodeContainer', 'c')], is_const=True) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PbbAddressTlvBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock::PbbAddressTlvBlock(ns3::PbbAddressTlvBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressTlvBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock::PbbAddressTlvBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressTlvBlock::Back() const [member function] cls.add_method('Back', 'ns3::Ptr< ns3::PbbAddressTlv >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Begin() [member function] cls.add_method('Begin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Clear() [member function] cls.add_method('Clear', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlvBlock::Empty() const [member function] cls.add_method('Empty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::End() [member function] cls.add_method('End', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressTlvBlock::Front() const [member function] cls.add_method('Front', 'ns3::Ptr< ns3::PbbAddressTlv >', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbAddressTlvBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressTlvBlock::Insert(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position, ns3::Ptr<ns3::PbbAddressTlv> const tlv) [member function] cls.add_method('Insert', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position'), param('ns3::Ptr< ns3::PbbAddressTlv > const', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PopBack() [member function] cls.add_method('PopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PopFront() [member function] cls.add_method('PopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PushBack(ns3::Ptr<ns3::PbbAddressTlv> tlv) [member function] cls.add_method('PushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::PushFront(ns3::Ptr<ns3::PbbAddressTlv> tlv) [member function] cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlvBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): int ns3::PbbAddressTlvBlock::Size() const [member function] cls.add_method('Size', 'int', [], is_const=True) return def register_Ns3PbbTlvBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbTlvBlock::PbbTlvBlock(ns3::PbbTlvBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbTlvBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbTlvBlock::PbbTlvBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbTlvBlock::Back() const [member function] cls.add_method('Back', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Begin() [member function] cls.add_method('Begin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Clear() [member function] cls.add_method('Clear', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): bool ns3::PbbTlvBlock::Empty() const [member function] cls.add_method('Empty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::End() [member function] cls.add_method('End', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbTlvBlock::Front() const [member function] cls.add_method('Front', 'ns3::Ptr< ns3::PbbTlv >', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbTlvBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbTlvBlock::Insert(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position, ns3::Ptr<ns3::PbbTlv> const tlv) [member function] cls.add_method('Insert', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position'), param('ns3::Ptr< ns3::PbbTlv > const', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PopBack() [member function] cls.add_method('PopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PopFront() [member function] cls.add_method('PopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('PushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::PushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbTlvBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): int ns3::PbbTlvBlock::Size() const [member function] cls.add_method('Size', 'int', [], is_const=True) return def register_Ns3PcapFile_methods(root_module, cls): ## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor] cls.add_constructor([]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function] cls.add_method('Diff', 'bool', [param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')], is_static=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function] cls.add_method('GetSwapMode', 'bool', []) ## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function] cls.add_method('Read', 'void', [param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable] cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True) ## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable] cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True) return def register_Ns3PcapHelper_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=65535, int32_t tzCorrection=0) [member function] cls.add_method('CreateFile', 'ns3::Ptr< ns3::PcapFileWrapper >', [param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='65535'), param('int32_t', 'tzCorrection', default_value='0')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromDevice', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')]) ## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function] cls.add_method('GetFilenameFromInterfacePair', 'std::string', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')]) return def register_Ns3PcapHelperForDevice_methods(root_module, cls): ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')]) ## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor] cls.add_constructor([]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function] cls.add_method('EnablePcap', 'void', [param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function] cls.add_method('EnablePcapAll', 'void', [param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')]) ## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function] cls.add_method('EnablePcapInternal', 'void', [param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')], is_pure_virtual=True, is_virtual=True) return def register_Ns3RandomVariable_methods(root_module, cls): cls.add_output_stream_operator() ## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor] cls.add_constructor([param('ns3::RandomVariable const &', 'o')]) ## random-variable.h (module 'core'): uint32_t ns3::RandomVariable::GetInteger() const [member function] cls.add_method('GetInteger', 'uint32_t', [], is_const=True) ## random-variable.h (module 'core'): double ns3::RandomVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) return def register_Ns3SeedManager_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::SeedManager::SeedManager() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::SeedManager::SeedManager(ns3::SeedManager const & arg0) [copy constructor] cls.add_constructor([param('ns3::SeedManager const &', 'arg0')]) ## random-variable.h (module 'core'): static bool ns3::SeedManager::CheckSeed(uint32_t seed) [member function] cls.add_method('CheckSeed', 'bool', [param('uint32_t', 'seed')], is_static=True) ## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetRun() [member function] cls.add_method('GetRun', 'uint32_t', [], is_static=True) ## random-variable.h (module 'core'): static uint32_t ns3::SeedManager::GetSeed() [member function] cls.add_method('GetSeed', 'uint32_t', [], is_static=True) ## random-variable.h (module 'core'): static void ns3::SeedManager::SetRun(uint32_t run) [member function] cls.add_method('SetRun', 'void', [param('uint32_t', 'run')], is_static=True) ## random-variable.h (module 'core'): static void ns3::SeedManager::SetSeed(uint32_t seed) [member function] cls.add_method('SetSeed', 'void', [param('uint32_t', 'seed')], is_static=True) return def register_Ns3SequenceNumber32_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('ns3::SequenceNumber< unsigned int, int > const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right')) cls.add_inplace_numeric_operator('+=', param('int', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber32'], root_module['ns3::SequenceNumber32'], param('int', 'right')) cls.add_inplace_numeric_operator('-=', param('int', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber() [constructor] cls.add_constructor([]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber(unsigned int value) [constructor] cls.add_constructor([param('unsigned int', 'value')]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned int, int>::SequenceNumber(ns3::SequenceNumber<unsigned int, int> const & value) [copy constructor] cls.add_constructor([param('ns3::SequenceNumber< unsigned int, int > const &', 'value')]) ## sequence-number.h (module 'network'): unsigned int ns3::SequenceNumber<unsigned int, int>::GetValue() const [member function] cls.add_method('GetValue', 'unsigned int', [], is_const=True) return def register_Ns3SequenceNumber16_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('ns3::SequenceNumber< unsigned short, short > const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('short int', 'right')) cls.add_inplace_numeric_operator('+=', param('short int', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber16'], root_module['ns3::SequenceNumber16'], param('short int', 'right')) cls.add_inplace_numeric_operator('-=', param('short int', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('>=') ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber() [constructor] cls.add_constructor([]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber(short unsigned int value) [constructor] cls.add_constructor([param('short unsigned int', 'value')]) ## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned short, short>::SequenceNumber(ns3::SequenceNumber<unsigned short, short> const & value) [copy constructor] cls.add_constructor([param('ns3::SequenceNumber< unsigned short, short > const &', 'value')]) ## sequence-number.h (module 'network'): short unsigned int ns3::SequenceNumber<unsigned short, short>::GetValue() const [member function] cls.add_method('GetValue', 'short unsigned int', [], is_const=True) return def register_Ns3SequentialVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor] cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')]) ## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor] cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Simulator_methods(root_module, cls): ## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Simulator const &', 'arg0')]) ## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function] cls.add_method('Cancel', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function] cls.add_method('GetContext', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function] cls.add_method('GetDelayLeft', 'ns3::Time', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function] cls.add_method('GetImplementation', 'ns3::Ptr< ns3::SimulatorImpl >', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function] cls.add_method('GetMaximumSimulationTime', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function] cls.add_method('IsExpired', 'bool', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function] cls.add_method('IsFinished', 'bool', [], is_static=True) ## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function] cls.add_method('Now', 'ns3::Time', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function] cls.add_method('Remove', 'void', [param('ns3::EventId const &', 'id')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function] cls.add_method('SetImplementation', 'void', [param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function] cls.add_method('SetScheduler', 'void', [param('ns3::ObjectFactory', 'schedulerFactory')], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function] cls.add_method('Stop', 'void', [], is_static=True) ## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function] cls.add_method('Stop', 'void', [param('ns3::Time const &', 'time')], is_static=True) return def register_Ns3SystemWallClockMs_methods(root_module, cls): ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor] cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')]) ## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor] cls.add_constructor([]) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function] cls.add_method('End', 'int64_t', []) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function] cls.add_method('GetElapsedReal', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function] cls.add_method('GetElapsedSystem', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function] cls.add_method('GetElapsedUser', 'int64_t', [], is_const=True) ## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function] cls.add_method('Start', 'void', []) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TriangularVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor] cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3UniformVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(double s, double l) [constructor] cls.add_constructor([param('double', 's'), param('double', 'l')]) ## random-variable.h (module 'core'): uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 's'), param('uint32_t', 'l')]) ## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue(double s, double l) [member function] cls.add_method('GetValue', 'double', [param('double', 's'), param('double', 'l')]) return def register_Ns3WeibullVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's')]) ## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')]) return def register_Ns3ZetaVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(double alpha) [constructor] cls.add_constructor([param('double', 'alpha')]) ## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable() [constructor] cls.add_constructor([]) return def register_Ns3ZipfVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor] cls.add_constructor([param('long int', 'N'), param('double', 'alpha')]) ## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable() [constructor] cls.add_constructor([]) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3ConstantVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(double c) [constructor] cls.add_constructor([param('double', 'c')]) ## random-variable.h (module 'core'): void ns3::ConstantVariable::SetConstant(double c) [member function] cls.add_method('SetConstant', 'void', [param('double', 'c')]) return def register_Ns3DeterministicVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor] cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')]) return def register_Ns3EmpiricalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): void ns3::EmpiricalVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) return def register_Ns3ErlangVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor] cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')]) ## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function] cls.add_method('GetValue', 'double', [param('unsigned int', 'k'), param('double', 'lambda')], is_const=True) return def register_Ns3ExponentialVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'b')]) return def register_Ns3FlowIdTag_methods(root_module, cls): ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag(ns3::FlowIdTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::FlowIdTag const &', 'arg0')]) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag() [constructor] cls.add_constructor([]) ## flow-id-tag.h (module 'network'): ns3::FlowIdTag::FlowIdTag(uint32_t flowId) [constructor] cls.add_constructor([param('uint32_t', 'flowId')]) ## flow-id-tag.h (module 'network'): static uint32_t ns3::FlowIdTag::AllocateFlowId() [member function] cls.add_method('AllocateFlowId', 'uint32_t', [], is_static=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Deserialize(ns3::TagBuffer buf) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buf')], is_virtual=True) ## flow-id-tag.h (module 'network'): uint32_t ns3::FlowIdTag::GetFlowId() const [member function] cls.add_method('GetFlowId', 'uint32_t', [], is_const=True) ## flow-id-tag.h (module 'network'): ns3::TypeId ns3::FlowIdTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): uint32_t ns3::FlowIdTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): static ns3::TypeId ns3::FlowIdTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::Serialize(ns3::TagBuffer buf) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buf')], is_const=True, is_virtual=True) ## flow-id-tag.h (module 'network'): void ns3::FlowIdTag::SetFlowId(uint32_t flowId) [member function] cls.add_method('SetFlowId', 'void', [param('uint32_t', 'flowId')]) return def register_Ns3GammaVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor] cls.add_constructor([param('double', 'alpha'), param('double', 'beta')]) ## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue() const [member function] cls.add_method('GetValue', 'double', [], is_const=True) ## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')], is_const=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3IntEmpiricalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor] cls.add_constructor([]) return def register_Ns3LlcSnapHeader_methods(root_module, cls): ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader::LlcSnapHeader(ns3::LlcSnapHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::LlcSnapHeader const &', 'arg0')]) ## llc-snap-header.h (module 'network'): ns3::LlcSnapHeader::LlcSnapHeader() [constructor] cls.add_constructor([]) ## llc-snap-header.h (module 'network'): uint32_t ns3::LlcSnapHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## llc-snap-header.h (module 'network'): ns3::TypeId ns3::LlcSnapHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): uint32_t ns3::LlcSnapHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): uint16_t ns3::LlcSnapHeader::GetType() [member function] cls.add_method('GetType', 'uint16_t', []) ## llc-snap-header.h (module 'network'): static ns3::TypeId ns3::LlcSnapHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## llc-snap-header.h (module 'network'): void ns3::LlcSnapHeader::SetType(uint16_t type) [member function] cls.add_method('SetType', 'void', [param('uint16_t', 'type')]) return def register_Ns3LogNormalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor] cls.add_constructor([param('double', 'mu'), param('double', 'sigma')]) return def register_Ns3NormalVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'v')]) ## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PacketBurst_methods(root_module, cls): ## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst(ns3::PacketBurst const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketBurst const &', 'arg0')]) ## packet-burst.h (module 'network'): ns3::PacketBurst::PacketBurst() [constructor] cls.add_constructor([]) ## packet-burst.h (module 'network'): void ns3::PacketBurst::AddPacket(ns3::Ptr<ns3::Packet> packet) [member function] cls.add_method('AddPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet')]) ## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): ns3::Ptr<ns3::PacketBurst> ns3::PacketBurst::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::PacketBurst >', [], is_const=True) ## packet-burst.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::Packet> > ns3::PacketBurst::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetNPackets() const [member function] cls.add_method('GetNPackets', 'uint32_t', [], is_const=True) ## packet-burst.h (module 'network'): std::list<ns3::Ptr<ns3::Packet>, std::allocator<ns3::Ptr<ns3::Packet> > > ns3::PacketBurst::GetPackets() const [member function] cls.add_method('GetPackets', 'std::list< ns3::Ptr< ns3::Packet > >', [], is_const=True) ## packet-burst.h (module 'network'): uint32_t ns3::PacketBurst::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet-burst.h (module 'network'): static ns3::TypeId ns3::PacketBurst::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packet-burst.h (module 'network'): void ns3::PacketBurst::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ParetoVariable_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor] cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m) [constructor] cls.add_constructor([param('double', 'm')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor] cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor] cls.add_constructor([param('std::pair< double, double >', 'params')]) ## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor] cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')]) return def register_Ns3PcapFileWrapper_methods(root_module, cls): ## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor] cls.add_constructor([]) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function] cls.add_method('Fail', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function] cls.add_method('Eof', 'bool', [], is_const=True) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function] cls.add_method('Clear', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function] cls.add_method('Open', 'void', [param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function] cls.add_method('Close', 'void', []) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function] cls.add_method('Init', 'void', [param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header & header, ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('ns3::Header &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')]) ## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function] cls.add_method('Write', 'void', [param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')]) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function] cls.add_method('GetMagic', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function] cls.add_method('GetVersionMajor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function] cls.add_method('GetVersionMinor', 'uint16_t', []) ## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function] cls.add_method('GetTimeZoneOffset', 'int32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function] cls.add_method('GetSigFigs', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function] cls.add_method('GetSnapLen', 'uint32_t', []) ## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function] cls.add_method('GetDataLinkType', 'uint32_t', []) return def register_Ns3Queue_methods(root_module, cls): ## queue.h (module 'network'): ns3::Queue::Queue(ns3::Queue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Queue const &', 'arg0')]) ## queue.h (module 'network'): ns3::Queue::Queue() [constructor] cls.add_constructor([]) ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::Dequeue() [member function] cls.add_method('Dequeue', 'ns3::Ptr< ns3::Packet >', []) ## queue.h (module 'network'): void ns3::Queue::DequeueAll() [member function] cls.add_method('DequeueAll', 'void', []) ## queue.h (module 'network'): bool ns3::Queue::Enqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Enqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNBytes() const [member function] cls.add_method('GetNBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetNPackets() const [member function] cls.add_method('GetNPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedBytes() const [member function] cls.add_method('GetTotalDroppedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalDroppedPackets() const [member function] cls.add_method('GetTotalDroppedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedBytes() const [member function] cls.add_method('GetTotalReceivedBytes', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): uint32_t ns3::Queue::GetTotalReceivedPackets() const [member function] cls.add_method('GetTotalReceivedPackets', 'uint32_t', [], is_const=True) ## queue.h (module 'network'): static ns3::TypeId ns3::Queue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## queue.h (module 'network'): bool ns3::Queue::IsEmpty() const [member function] cls.add_method('IsEmpty', 'bool', [], is_const=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::Peek() const [member function] cls.add_method('Peek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True) ## queue.h (module 'network'): void ns3::Queue::ResetStatistics() [member function] cls.add_method('ResetStatistics', 'void', []) ## queue.h (module 'network'): void ns3::Queue::Drop(ns3::Ptr<ns3::Packet> packet) [member function] cls.add_method('Drop', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet')], visibility='protected') ## queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Queue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): bool ns3::Queue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], is_pure_virtual=True, visibility='private', is_virtual=True) ## queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::Queue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True) return def register_Ns3RadiotapHeader_methods(root_module, cls): ## radiotap-header.h (module 'network'): ns3::RadiotapHeader::RadiotapHeader(ns3::RadiotapHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::RadiotapHeader const &', 'arg0')]) ## radiotap-header.h (module 'network'): ns3::RadiotapHeader::RadiotapHeader() [constructor] cls.add_constructor([]) ## radiotap-header.h (module 'network'): uint32_t ns3::RadiotapHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetAntennaNoisePower() const [member function] cls.add_method('GetAntennaNoisePower', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetAntennaSignalPower() const [member function] cls.add_method('GetAntennaSignalPower', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint16_t ns3::RadiotapHeader::GetChannelFlags() const [member function] cls.add_method('GetChannelFlags', 'uint16_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint16_t ns3::RadiotapHeader::GetChannelFrequency() const [member function] cls.add_method('GetChannelFrequency', 'uint16_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetFrameFlags() const [member function] cls.add_method('GetFrameFlags', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): ns3::TypeId ns3::RadiotapHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): uint8_t ns3::RadiotapHeader::GetRate() const [member function] cls.add_method('GetRate', 'uint8_t', [], is_const=True) ## radiotap-header.h (module 'network'): uint32_t ns3::RadiotapHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): uint64_t ns3::RadiotapHeader::GetTsft() const [member function] cls.add_method('GetTsft', 'uint64_t', [], is_const=True) ## radiotap-header.h (module 'network'): static ns3::TypeId ns3::RadiotapHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetAntennaNoisePower(double noise) [member function] cls.add_method('SetAntennaNoisePower', 'void', [param('double', 'noise')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetAntennaSignalPower(double signal) [member function] cls.add_method('SetAntennaSignalPower', 'void', [param('double', 'signal')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetChannelFrequencyAndFlags(uint16_t frequency, uint16_t flags) [member function] cls.add_method('SetChannelFrequencyAndFlags', 'void', [param('uint16_t', 'frequency'), param('uint16_t', 'flags')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetFrameFlags(uint8_t flags) [member function] cls.add_method('SetFrameFlags', 'void', [param('uint8_t', 'flags')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetRate(uint8_t rate) [member function] cls.add_method('SetRate', 'void', [param('uint8_t', 'rate')]) ## radiotap-header.h (module 'network'): void ns3::RadiotapHeader::SetTsft(uint64_t tsft) [member function] cls.add_method('SetTsft', 'void', [param('uint64_t', 'tsft')]) return def register_Ns3RedQueue_methods(root_module, cls): ## red-queue.h (module 'network'): ns3::RedQueue::RedQueue(ns3::RedQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::RedQueue const &', 'arg0')]) ## red-queue.h (module 'network'): ns3::RedQueue::RedQueue() [constructor] cls.add_constructor([]) ## red-queue.h (module 'network'): ns3::Queue::QueueMode ns3::RedQueue::GetMode() [member function] cls.add_method('GetMode', 'ns3::Queue::QueueMode', []) ## red-queue.h (module 'network'): uint32_t ns3::RedQueue::GetQueueSize() [member function] cls.add_method('GetQueueSize', 'uint32_t', []) ## red-queue.h (module 'network'): ns3::RedQueue::Stats ns3::RedQueue::GetStats() [member function] cls.add_method('GetStats', 'ns3::RedQueue::Stats', []) ## red-queue.h (module 'network'): static ns3::TypeId ns3::RedQueue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## red-queue.h (module 'network'): void ns3::RedQueue::SetMode(ns3::Queue::QueueMode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::Queue::QueueMode', 'mode')]) ## red-queue.h (module 'network'): void ns3::RedQueue::SetQueueLimit(uint32_t lim) [member function] cls.add_method('SetQueueLimit', 'void', [param('uint32_t', 'lim')]) ## red-queue.h (module 'network'): void ns3::RedQueue::SetTh(double minTh, double maxTh) [member function] cls.add_method('SetTh', 'void', [param('double', 'minTh'), param('double', 'maxTh')]) ## red-queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::RedQueue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], visibility='private', is_virtual=True) ## red-queue.h (module 'network'): bool ns3::RedQueue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## red-queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::RedQueue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3RedQueueStats_methods(root_module, cls): ## red-queue.h (module 'network'): ns3::RedQueue::Stats::Stats() [constructor] cls.add_constructor([]) ## red-queue.h (module 'network'): ns3::RedQueue::Stats::Stats(ns3::RedQueue::Stats const & arg0) [copy constructor] cls.add_constructor([param('ns3::RedQueue::Stats const &', 'arg0')]) ## red-queue.h (module 'network'): ns3::RedQueue::Stats::forcedDrop [variable] cls.add_instance_attribute('forcedDrop', 'uint32_t', is_const=False) ## red-queue.h (module 'network'): ns3::RedQueue::Stats::qLimDrop [variable] cls.add_instance_attribute('qLimDrop', 'uint32_t', is_const=False) ## red-queue.h (module 'network'): ns3::RedQueue::Stats::unforcedDrop [variable] cls.add_instance_attribute('unforcedDrop', 'uint32_t', is_const=False) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter< ns3::PbbAddressBlock > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter< ns3::PbbMessage > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter< ns3::PbbPacket > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter< ns3::PbbTlv > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Socket_methods(root_module, cls): ## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [copy constructor] cls.add_constructor([param('ns3::Socket const &', 'arg0')]) ## socket.h (module 'network'): ns3::Socket::Socket() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind() [member function] cls.add_method('Bind', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function] cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')], is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Close() [member function] cls.add_method('Close', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function] cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) ## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function] cls.add_method('IsRecvPktInfo', 'bool', [], is_const=True) ## socket.h (module 'network'): int ns3::Socket::Listen() [member function] cls.add_method('Listen', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', []) ## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Recv', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'int', [param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')]) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')]) ## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function] cls.add_method('SendTo', 'int', [param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')]) ## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function] cls.add_method('SetAcceptCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')]) ## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function] cls.add_method('SetCloseCallbacks', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')]) ## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function] cls.add_method('SetConnectCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')]) ## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function] cls.add_method('SetDataSentCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function] cls.add_method('SetRecvCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')]) ## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function] cls.add_method('SetRecvPktInfo', 'void', [param('bool', 'flag')]) ## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function] cls.add_method('SetSendCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')]) ## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_pure_virtual=True, is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function] cls.add_method('NotifyConnectionFailed', 'void', [], visibility='protected') ## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function] cls.add_method('NotifyConnectionRequest', 'bool', [param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function] cls.add_method('NotifyConnectionSucceeded', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function] cls.add_method('NotifyDataRecv', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function] cls.add_method('NotifyDataSent', 'void', [param('uint32_t', 'size')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function] cls.add_method('NotifyErrorClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function] cls.add_method('NotifyNewConnectionCreated', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function] cls.add_method('NotifyNormalClose', 'void', [], visibility='protected') ## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function] cls.add_method('NotifySend', 'void', [param('uint32_t', 'spaceAvailable')], visibility='protected') return def register_Ns3SocketAddressTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag(ns3::SocketAddressTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketAddressTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketAddressTag::SocketAddressTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketAddressTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::Address ns3::SocketAddressTag::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketAddressTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketAddressTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketAddressTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketAddressTag::SetAddress(ns3::Address addr) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')]) return def register_Ns3SocketFactory_methods(root_module, cls): ## socket-factory.h (module 'network'): ns3::SocketFactory::SocketFactory(ns3::SocketFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketFactory const &', 'arg0')]) ## socket-factory.h (module 'network'): ns3::SocketFactory::SocketFactory() [constructor] cls.add_constructor([]) ## socket-factory.h (module 'network'): ns3::Ptr<ns3::Socket> ns3::SocketFactory::CreateSocket() [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [], is_pure_virtual=True, is_virtual=True) ## socket-factory.h (module 'network'): static ns3::TypeId ns3::SocketFactory::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3SocketIpTtlTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function] cls.add_method('GetTtl', 'uint8_t', [], is_const=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function] cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')]) return def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls): ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [copy constructor] cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')]) ## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor] cls.add_constructor([]) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function] cls.add_method('Disable', 'void', []) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function] cls.add_method('Enable', 'void', []) ## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Application_methods(root_module, cls): ## application.h (module 'network'): ns3::Application::Application(ns3::Application const & arg0) [copy constructor] cls.add_constructor([param('ns3::Application const &', 'arg0')]) ## application.h (module 'network'): ns3::Application::Application() [constructor] cls.add_constructor([]) ## application.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Application::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True) ## application.h (module 'network'): static ns3::TypeId ns3::Application::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## application.h (module 'network'): void ns3::Application::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## application.h (module 'network'): void ns3::Application::SetStartTime(ns3::Time start) [member function] cls.add_method('SetStartTime', 'void', [param('ns3::Time', 'start')]) ## application.h (module 'network'): void ns3::Application::SetStopTime(ns3::Time stop) [member function] cls.add_method('SetStopTime', 'void', [param('ns3::Time', 'stop')]) ## application.h (module 'network'): void ns3::Application::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## application.h (module 'network'): void ns3::Application::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## application.h (module 'network'): void ns3::Application::StartApplication() [member function] cls.add_method('StartApplication', 'void', [], visibility='private', is_virtual=True) ## application.h (module 'network'): void ns3::Application::StopApplication() [member function] cls.add_method('StopApplication', 'void', [], visibility='private', is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3BooleanChecker_methods(root_module, cls): ## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor] cls.add_constructor([]) ## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')]) return def register_Ns3BooleanValue_methods(root_module, cls): cls.add_output_stream_operator() ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')]) ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor] cls.add_constructor([]) ## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor] cls.add_constructor([param('bool', 'value')]) ## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function] cls.add_method('Get', 'bool', [], is_const=True) ## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function] cls.add_method('Set', 'void', [param('bool', 'value')]) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3Channel_methods(root_module, cls): ## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor] cls.add_constructor([param('ns3::Channel const &', 'arg0')]) ## channel.h (module 'network'): ns3::Channel::Channel() [constructor] cls.add_constructor([]) ## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3DataRateChecker_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateChecker::DataRateChecker(ns3::DataRateChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateChecker const &', 'arg0')]) return def register_Ns3DataRateValue_methods(root_module, cls): ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue() [constructor] cls.add_constructor([]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRateValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DataRateValue const &', 'arg0')]) ## data-rate.h (module 'network'): ns3::DataRateValue::DataRateValue(ns3::DataRate const & value) [constructor] cls.add_constructor([param('ns3::DataRate const &', 'value')]) ## data-rate.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::DataRateValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): bool ns3::DataRateValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## data-rate.h (module 'network'): ns3::DataRate ns3::DataRateValue::Get() const [member function] cls.add_method('Get', 'ns3::DataRate', [], is_const=True) ## data-rate.h (module 'network'): std::string ns3::DataRateValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## data-rate.h (module 'network'): void ns3::DataRateValue::Set(ns3::DataRate const & value) [member function] cls.add_method('Set', 'void', [param('ns3::DataRate const &', 'value')]) return def register_Ns3DropTailQueue_methods(root_module, cls): ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::DropTailQueue(ns3::DropTailQueue const & arg0) [copy constructor] cls.add_constructor([param('ns3::DropTailQueue const &', 'arg0')]) ## drop-tail-queue.h (module 'network'): ns3::DropTailQueue::DropTailQueue() [constructor] cls.add_constructor([]) ## drop-tail-queue.h (module 'network'): ns3::Queue::QueueMode ns3::DropTailQueue::GetMode() [member function] cls.add_method('GetMode', 'ns3::Queue::QueueMode', []) ## drop-tail-queue.h (module 'network'): static ns3::TypeId ns3::DropTailQueue::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## drop-tail-queue.h (module 'network'): void ns3::DropTailQueue::SetMode(ns3::Queue::QueueMode mode) [member function] cls.add_method('SetMode', 'void', [param('ns3::Queue::QueueMode', 'mode')]) ## drop-tail-queue.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::DropTailQueue::DoDequeue() [member function] cls.add_method('DoDequeue', 'ns3::Ptr< ns3::Packet >', [], visibility='private', is_virtual=True) ## drop-tail-queue.h (module 'network'): bool ns3::DropTailQueue::DoEnqueue(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoEnqueue', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## drop-tail-queue.h (module 'network'): ns3::Ptr<const ns3::Packet> ns3::DropTailQueue::DoPeek() const [member function] cls.add_method('DoPeek', 'ns3::Ptr< ns3::Packet const >', [], is_const=True, visibility='private', is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3ErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel(ns3::ErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ErrorModel::ErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): void ns3::ErrorModel::Disable() [member function] cls.add_method('Disable', 'void', []) ## error-model.h (module 'network'): void ns3::ErrorModel::Enable() [member function] cls.add_method('Enable', 'void', []) ## error-model.h (module 'network'): static ns3::TypeId ns3::ErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsCorrupt(ns3::Ptr<ns3::Packet> pkt) [member function] cls.add_method('IsCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'pkt')]) ## error-model.h (module 'network'): bool ns3::ErrorModel::IsEnabled() const [member function] cls.add_method('IsEnabled', 'bool', [], is_const=True) ## error-model.h (module 'network'): void ns3::ErrorModel::Reset() [member function] cls.add_method('Reset', 'void', []) ## error-model.h (module 'network'): bool ns3::ErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> arg0) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'arg0')], is_pure_virtual=True, visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], is_pure_virtual=True, visibility='private', is_virtual=True) return def register_Ns3EthernetHeader_methods(root_module, cls): ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader(ns3::EthernetHeader const & arg0) [copy constructor] cls.add_constructor([param('ns3::EthernetHeader const &', 'arg0')]) ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader(bool hasPreamble) [constructor] cls.add_constructor([param('bool', 'hasPreamble')]) ## ethernet-header.h (module 'network'): ns3::EthernetHeader::EthernetHeader() [constructor] cls.add_constructor([]) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## ethernet-header.h (module 'network'): ns3::Mac48Address ns3::EthernetHeader::GetDestination() const [member function] cls.add_method('GetDestination', 'ns3::Mac48Address', [], is_const=True) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::GetHeaderSize() const [member function] cls.add_method('GetHeaderSize', 'uint32_t', [], is_const=True) ## ethernet-header.h (module 'network'): ns3::TypeId ns3::EthernetHeader::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): uint16_t ns3::EthernetHeader::GetLengthType() const [member function] cls.add_method('GetLengthType', 'uint16_t', [], is_const=True) ## ethernet-header.h (module 'network'): ns3::ethernet_header_t ns3::EthernetHeader::GetPacketType() const [member function] cls.add_method('GetPacketType', 'ns3::ethernet_header_t', [], is_const=True) ## ethernet-header.h (module 'network'): uint64_t ns3::EthernetHeader::GetPreambleSfd() const [member function] cls.add_method('GetPreambleSfd', 'uint64_t', [], is_const=True) ## ethernet-header.h (module 'network'): uint32_t ns3::EthernetHeader::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): ns3::Mac48Address ns3::EthernetHeader::GetSource() const [member function] cls.add_method('GetSource', 'ns3::Mac48Address', [], is_const=True) ## ethernet-header.h (module 'network'): static ns3::TypeId ns3::EthernetHeader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetDestination(ns3::Mac48Address destination) [member function] cls.add_method('SetDestination', 'void', [param('ns3::Mac48Address', 'destination')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetLengthType(uint16_t size) [member function] cls.add_method('SetLengthType', 'void', [param('uint16_t', 'size')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetPreambleSfd(uint64_t preambleSfd) [member function] cls.add_method('SetPreambleSfd', 'void', [param('uint64_t', 'preambleSfd')]) ## ethernet-header.h (module 'network'): void ns3::EthernetHeader::SetSource(ns3::Mac48Address source) [member function] cls.add_method('SetSource', 'void', [param('ns3::Mac48Address', 'source')]) return def register_Ns3EthernetTrailer_methods(root_module, cls): ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer::EthernetTrailer(ns3::EthernetTrailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::EthernetTrailer const &', 'arg0')]) ## ethernet-trailer.h (module 'network'): ns3::EthernetTrailer::EthernetTrailer() [constructor] cls.add_constructor([]) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::CalcFcs(ns3::Ptr<const ns3::Packet> p) [member function] cls.add_method('CalcFcs', 'void', [param('ns3::Ptr< ns3::Packet const >', 'p')]) ## ethernet-trailer.h (module 'network'): bool ns3::EthernetTrailer::CheckFcs(ns3::Ptr<const ns3::Packet> p) const [member function] cls.add_method('CheckFcs', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p')], is_const=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::EnableFcs(bool enable) [member function] cls.add_method('EnableFcs', 'void', [param('bool', 'enable')]) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetFcs() [member function] cls.add_method('GetFcs', 'uint32_t', []) ## ethernet-trailer.h (module 'network'): ns3::TypeId ns3::EthernetTrailer::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): uint32_t ns3::EthernetTrailer::GetTrailerSize() const [member function] cls.add_method('GetTrailerSize', 'uint32_t', [], is_const=True) ## ethernet-trailer.h (module 'network'): static ns3::TypeId ns3::EthernetTrailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::Serialize(ns3::Buffer::Iterator end) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'end')], is_const=True, is_virtual=True) ## ethernet-trailer.h (module 'network'): void ns3::EthernetTrailer::SetFcs(uint32_t fcs) [member function] cls.add_method('SetFcs', 'void', [param('uint32_t', 'fcs')]) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, visibility='protected', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3ListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel(ns3::ListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ListErrorModel::ListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectFactoryChecker_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')]) return def register_Ns3ObjectFactoryValue_methods(root_module, cls): ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor] cls.add_constructor([]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')]) ## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor] cls.add_constructor([param('ns3::ObjectFactory const &', 'value')]) ## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function] cls.add_method('Get', 'ns3::ObjectFactory', [], is_const=True) ## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function] cls.add_method('Set', 'void', [param('ns3::ObjectFactory const &', 'value')]) return def register_Ns3OutputStreamWrapper_methods(root_module, cls): ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor] cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor] cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')]) ## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor] cls.add_constructor([param('std::ostream *', 'os')]) ## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function] cls.add_method('GetStream', 'std::ostream *', []) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], deprecated=True, is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'arg0')]) return def register_Ns3PacketSocket_methods(root_module, cls): ## packet-socket.h (module 'network'): ns3::PacketSocket::PacketSocket(ns3::PacketSocket const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocket const &', 'arg0')]) ## packet-socket.h (module 'network'): ns3::PacketSocket::PacketSocket() [constructor] cls.add_constructor([]) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Bind() [member function] cls.add_method('Bind', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Bind(ns3::Address const & address) [member function] cls.add_method('Bind', 'int', [param('ns3::Address const &', 'address')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Bind6() [member function] cls.add_method('Bind6', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Close() [member function] cls.add_method('Close', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Connect(ns3::Address const & address) [member function] cls.add_method('Connect', 'int', [param('ns3::Address const &', 'address')], is_virtual=True) ## packet-socket.h (module 'network'): bool ns3::PacketSocket::GetAllowBroadcast() const [member function] cls.add_method('GetAllowBroadcast', 'bool', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Socket::SocketErrno ns3::PacketSocket::GetErrno() const [member function] cls.add_method('GetErrno', 'ns3::Socket::SocketErrno', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::PacketSocket::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): uint32_t ns3::PacketSocket::GetRxAvailable() const [member function] cls.add_method('GetRxAvailable', 'uint32_t', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::GetSockName(ns3::Address & address) const [member function] cls.add_method('GetSockName', 'int', [param('ns3::Address &', 'address')], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): ns3::Socket::SocketType ns3::PacketSocket::GetSocketType() const [member function] cls.add_method('GetSocketType', 'ns3::Socket::SocketType', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): uint32_t ns3::PacketSocket::GetTxAvailable() const [member function] cls.add_method('GetTxAvailable', 'uint32_t', [], is_const=True, is_virtual=True) ## packet-socket.h (module 'network'): static ns3::TypeId ns3::PacketSocket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Listen() [member function] cls.add_method('Listen', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PacketSocket::Recv(uint32_t maxSize, uint32_t flags) [member function] cls.add_method('Recv', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags')], is_virtual=True) ## packet-socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PacketSocket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function] cls.add_method('RecvFrom', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function] cls.add_method('Send', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function] cls.add_method('SendTo', 'int', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')], is_virtual=True) ## packet-socket.h (module 'network'): bool ns3::PacketSocket::SetAllowBroadcast(bool allowBroadcast) [member function] cls.add_method('SetAllowBroadcast', 'bool', [param('bool', 'allowBroadcast')], is_virtual=True) ## packet-socket.h (module 'network'): void ns3::PacketSocket::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## packet-socket.h (module 'network'): int ns3::PacketSocket::ShutdownRecv() [member function] cls.add_method('ShutdownRecv', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): int ns3::PacketSocket::ShutdownSend() [member function] cls.add_method('ShutdownSend', 'int', [], is_virtual=True) ## packet-socket.h (module 'network'): void ns3::PacketSocket::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3PacketSocketFactory_methods(root_module, cls): ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory::PacketSocketFactory(ns3::PacketSocketFactory const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketSocketFactory const &', 'arg0')]) ## packet-socket-factory.h (module 'network'): ns3::PacketSocketFactory::PacketSocketFactory() [constructor] cls.add_constructor([]) ## packet-socket-factory.h (module 'network'): ns3::Ptr<ns3::Socket> ns3::PacketSocketFactory::CreateSocket() [member function] cls.add_method('CreateSocket', 'ns3::Ptr< ns3::Socket >', [], is_virtual=True) ## packet-socket-factory.h (module 'network'): static ns3::TypeId ns3::PacketSocketFactory::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) return def register_Ns3PbbAddressBlock_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbAddressBlock::PbbAddressBlock(ns3::PbbAddressBlock const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlock const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlock::PbbAddressBlock() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::AddressBack() const [member function] cls.add_method('AddressBack', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressBegin() [member function] cls.add_method('AddressBegin', 'std::_List_iterator< ns3::Address >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Address> ns3::PbbAddressBlock::AddressBegin() const [member function] cls.add_method('AddressBegin', 'std::_List_const_iterator< ns3::Address >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressClear() [member function] cls.add_method('AddressClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::AddressEmpty() const [member function] cls.add_method('AddressEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressEnd() [member function] cls.add_method('AddressEnd', 'std::_List_iterator< ns3::Address >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Address> ns3::PbbAddressBlock::AddressEnd() const [member function] cls.add_method('AddressEnd', 'std::_List_const_iterator< ns3::Address >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressErase(std::_List_iterator<ns3::Address> position) [member function] cls.add_method('AddressErase', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressErase(std::_List_iterator<ns3::Address> first, std::_List_iterator<ns3::Address> last) [member function] cls.add_method('AddressErase', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'first'), param('std::_List_iterator< ns3::Address >', 'last')]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::AddressFront() const [member function] cls.add_method('AddressFront', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Address> ns3::PbbAddressBlock::AddressInsert(std::_List_iterator<ns3::Address> position, ns3::Address const value) [member function] cls.add_method('AddressInsert', 'std::_List_iterator< ns3::Address >', [param('std::_List_iterator< ns3::Address >', 'position'), param('ns3::Address const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPopBack() [member function] cls.add_method('AddressPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPopFront() [member function] cls.add_method('AddressPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPushBack(ns3::Address address) [member function] cls.add_method('AddressPushBack', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::AddressPushFront(ns3::Address address) [member function] cls.add_method('AddressPushFront', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::AddressSize() const [member function] cls.add_method('AddressSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): uint32_t ns3::PbbAddressBlock::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::PrefixBack() const [member function] cls.add_method('PrefixBack', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixBegin() [member function] cls.add_method('PrefixBegin', 'std::_List_iterator< unsigned char >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<unsigned char> ns3::PbbAddressBlock::PrefixBegin() const [member function] cls.add_method('PrefixBegin', 'std::_List_const_iterator< unsigned char >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixClear() [member function] cls.add_method('PrefixClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::PrefixEmpty() const [member function] cls.add_method('PrefixEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixEnd() [member function] cls.add_method('PrefixEnd', 'std::_List_iterator< unsigned char >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<unsigned char> ns3::PbbAddressBlock::PrefixEnd() const [member function] cls.add_method('PrefixEnd', 'std::_List_const_iterator< unsigned char >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixErase(std::_List_iterator<unsigned char> position) [member function] cls.add_method('PrefixErase', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixErase(std::_List_iterator<unsigned char> first, std::_List_iterator<unsigned char> last) [member function] cls.add_method('PrefixErase', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'first'), param('std::_List_iterator< unsigned char >', 'last')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::PrefixFront() const [member function] cls.add_method('PrefixFront', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<unsigned char> ns3::PbbAddressBlock::PrefixInsert(std::_List_iterator<unsigned char> position, uint8_t const value) [member function] cls.add_method('PrefixInsert', 'std::_List_iterator< unsigned char >', [param('std::_List_iterator< unsigned char >', 'position'), param('uint8_t const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPopBack() [member function] cls.add_method('PrefixPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPopFront() [member function] cls.add_method('PrefixPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPushBack(uint8_t prefix) [member function] cls.add_method('PrefixPushBack', 'void', [param('uint8_t', 'prefix')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrefixPushFront(uint8_t prefix) [member function] cls.add_method('PrefixPushFront', 'void', [param('uint8_t', 'prefix')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::PrefixSize() const [member function] cls.add_method('PrefixSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressBlock::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbAddressTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> const ns3::PbbAddressBlock::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbAddressTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbAddressBlock::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > last) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> ns3::PbbAddressBlock::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbAddressTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressTlv> const ns3::PbbAddressBlock::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbAddressTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > ns3::PbbAddressBlock::TlvInsert(std::_List_iterator<ns3::Ptr<ns3::PbbAddressTlv> > position, ns3::Ptr<ns3::PbbTlv> const value) [member function] cls.add_method('TlvInsert', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressTlv > >', 'position'), param('ns3::Ptr< ns3::PbbTlv > const', 'value')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPushBack(ns3::Ptr<ns3::PbbAddressTlv> address) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::TlvPushFront(ns3::Ptr<ns3::PbbAddressTlv> address) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressTlv >', 'address')]) ## packetbb.h (module 'network'): int ns3::PbbAddressBlock::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlock::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlock::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlock::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbAddressBlockIpv4_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4::PbbAddressBlockIpv4(ns3::PbbAddressBlockIpv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlockIpv4 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv4::PbbAddressBlockIpv4() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlockIpv4::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlockIpv4::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv4::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv4::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbAddressBlockIpv6_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6::PbbAddressBlockIpv6(ns3::PbbAddressBlockIpv6 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressBlockIpv6 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbAddressBlockIpv6::PbbAddressBlockIpv6() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Address ns3::PbbAddressBlockIpv6::DeserializeAddress(uint8_t * buffer) const [member function] cls.add_method('DeserializeAddress', 'ns3::Address', [param('uint8_t *', 'buffer')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressBlockIpv6::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'uint8_t', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv6::PrintAddress(std::ostream & os, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('PrintAddress', 'void', [param('std::ostream &', 'os'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbAddressBlockIpv6::SerializeAddress(uint8_t * buffer, std::_List_const_iterator<ns3::Address> iter) const [member function] cls.add_method('SerializeAddress', 'void', [param('uint8_t *', 'buffer'), param('std::_List_const_iterator< ns3::Address >', 'iter')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessage_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbMessage::PbbMessage(ns3::PbbMessage const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessage const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessage::PbbMessage() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockBack() [member function] cls.add_method('AddressBlockBack', 'ns3::Ptr< ns3::PbbAddressBlock >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> const ns3::PbbMessage::AddressBlockBack() const [member function] cls.add_method('AddressBlockBack', 'ns3::Ptr< ns3::PbbAddressBlock > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockBegin() [member function] cls.add_method('AddressBlockBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockBegin() const [member function] cls.add_method('AddressBlockBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockClear() [member function] cls.add_method('AddressBlockClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbMessage::AddressBlockEmpty() const [member function] cls.add_method('AddressBlockEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockEnd() [member function] cls.add_method('AddressBlockEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockEnd() const [member function] cls.add_method('AddressBlockEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > position) [member function] cls.add_method('AddressBlockErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > ns3::PbbMessage::AddressBlockErase(std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > first, std::_List_iterator<ns3::Ptr<ns3::PbbAddressBlock> > last) [member function] cls.add_method('AddressBlockErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbAddressBlock > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockFront() [member function] cls.add_method('AddressBlockFront', 'ns3::Ptr< ns3::PbbAddressBlock >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> const ns3::PbbMessage::AddressBlockFront() const [member function] cls.add_method('AddressBlockFront', 'ns3::Ptr< ns3::PbbAddressBlock > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPopBack() [member function] cls.add_method('AddressBlockPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPopFront() [member function] cls.add_method('AddressBlockPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPushBack(ns3::Ptr<ns3::PbbAddressBlock> block) [member function] cls.add_method('AddressBlockPushBack', 'void', [param('ns3::Ptr< ns3::PbbAddressBlock >', 'block')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::AddressBlockPushFront(ns3::Ptr<ns3::PbbAddressBlock> block) [member function] cls.add_method('AddressBlockPushFront', 'void', [param('ns3::Ptr< ns3::PbbAddressBlock >', 'block')]) ## packetbb.h (module 'network'): int ns3::PbbMessage::AddressBlockSize() const [member function] cls.add_method('AddressBlockSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): static ns3::Ptr<ns3::PbbMessage> ns3::PbbMessage::DeserializeMessage(ns3::Buffer::Iterator & start) [member function] cls.add_method('DeserializeMessage', 'ns3::Ptr< ns3::PbbMessage >', [param('ns3::Buffer::Iterator &', 'start')], is_static=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetHopCount() const [member function] cls.add_method('GetHopCount', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetHopLimit() const [member function] cls.add_method('GetHopLimit', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessage::GetOriginatorAddress() const [member function] cls.add_method('GetOriginatorAddress', 'ns3::Address', [], is_const=True) ## packetbb.h (module 'network'): uint16_t ns3::PbbMessage::GetSequenceNumber() const [member function] cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbMessage::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbMessage::GetType() const [member function] cls.add_method('GetType', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasHopCount() const [member function] cls.add_method('HasHopCount', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasHopLimit() const [member function] cls.add_method('HasHopLimit', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasOriginatorAddress() const [member function] cls.add_method('HasOriginatorAddress', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbMessage::HasSequenceNumber() const [member function] cls.add_method('HasSequenceNumber', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetHopCount(uint8_t hopcount) [member function] cls.add_method('SetHopCount', 'void', [param('uint8_t', 'hopcount')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetHopLimit(uint8_t hoplimit) [member function] cls.add_method('SetHopLimit', 'void', [param('uint8_t', 'hoplimit')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetOriginatorAddress(ns3::Address address) [member function] cls.add_method('SetOriginatorAddress', 'void', [param('ns3::Address', 'address')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetSequenceNumber(uint16_t seqnum) [member function] cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'seqnum')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::SetType(uint8_t type) [member function] cls.add_method('SetType', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbMessage::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbMessage::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbMessage::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbMessage::TlvErase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('TlvErase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbMessage::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbMessage::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbMessage::TlvPushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): int ns3::PbbMessage::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessage::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessage::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessage::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessage::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_pure_virtual=True, is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessageIpv4_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbMessageIpv4::PbbMessageIpv4(ns3::PbbMessageIpv4 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessageIpv4 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessageIpv4::PbbMessageIpv4() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessageIpv4::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessageIpv4::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessageIpv4::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv4::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv4::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbMessageIpv6_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbMessageIpv6::PbbMessageIpv6(ns3::PbbMessageIpv6 const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbMessageIpv6 const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbMessageIpv6::PbbMessageIpv6() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbAddressBlock> ns3::PbbMessageIpv6::AddressBlockDeserialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('AddressBlockDeserialize', 'ns3::Ptr< ns3::PbbAddressBlock >', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::Address ns3::PbbMessageIpv6::DeserializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('DeserializeOriginatorAddress', 'ns3::Address', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): ns3::PbbAddressLength ns3::PbbMessageIpv6::GetAddressLength() const [member function] cls.add_method('GetAddressLength', 'ns3::PbbAddressLength', [], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv6::PrintOriginatorAddress(std::ostream & os) const [member function] cls.add_method('PrintOriginatorAddress', 'void', [param('std::ostream &', 'os')], is_const=True, visibility='protected', is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbMessageIpv6::SerializeOriginatorAddress(ns3::Buffer::Iterator & start) const [member function] cls.add_method('SerializeOriginatorAddress', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True, visibility='protected', is_virtual=True) return def register_Ns3PbbPacket_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbPacket::PbbPacket(ns3::PbbPacket const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbPacket const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbPacket::PbbPacket() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): uint32_t ns3::PbbPacket::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > first, std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', 'last')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > position) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'position')]) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::Erase(std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > first, std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > last) [member function] cls.add_method('Erase', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', [param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'first'), param('std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', 'last')]) ## packetbb.h (module 'network'): ns3::TypeId ns3::PbbPacket::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): uint16_t ns3::PbbPacket::GetSequenceNumber() const [member function] cls.add_method('GetSequenceNumber', 'uint16_t', [], is_const=True) ## packetbb.h (module 'network'): uint32_t ns3::PbbPacket::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): static ns3::TypeId ns3::PbbPacket::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbPacket::GetVersion() const [member function] cls.add_method('GetVersion', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbPacket::HasSequenceNumber() const [member function] cls.add_method('HasSequenceNumber', 'bool', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> ns3::PbbPacket::MessageBack() [member function] cls.add_method('MessageBack', 'ns3::Ptr< ns3::PbbMessage >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> const ns3::PbbPacket::MessageBack() const [member function] cls.add_method('MessageBack', 'ns3::Ptr< ns3::PbbMessage > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageBegin() [member function] cls.add_method('MessageBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageBegin() const [member function] cls.add_method('MessageBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbMessage > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessageClear() [member function] cls.add_method('MessageClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbPacket::MessageEmpty() const [member function] cls.add_method('MessageEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageEnd() [member function] cls.add_method('MessageEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbMessage > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbMessage> > ns3::PbbPacket::MessageEnd() const [member function] cls.add_method('MessageEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbMessage > >', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> ns3::PbbPacket::MessageFront() [member function] cls.add_method('MessageFront', 'ns3::Ptr< ns3::PbbMessage >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbMessage> const ns3::PbbPacket::MessageFront() const [member function] cls.add_method('MessageFront', 'ns3::Ptr< ns3::PbbMessage > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePopBack() [member function] cls.add_method('MessagePopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePopFront() [member function] cls.add_method('MessagePopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePushBack(ns3::Ptr<ns3::PbbMessage> message) [member function] cls.add_method('MessagePushBack', 'void', [param('ns3::Ptr< ns3::PbbMessage >', 'message')]) ## packetbb.h (module 'network'): void ns3::PbbPacket::MessagePushFront(ns3::Ptr<ns3::PbbMessage> message) [member function] cls.add_method('MessagePushFront', 'void', [param('ns3::Ptr< ns3::PbbMessage >', 'message')]) ## packetbb.h (module 'network'): int ns3::PbbPacket::MessageSize() const [member function] cls.add_method('MessageSize', 'int', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::SetSequenceNumber(uint16_t number) [member function] cls.add_method('SetSequenceNumber', 'void', [param('uint16_t', 'number')]) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbPacket::TlvBack() [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbPacket::TlvBack() const [member function] cls.add_method('TlvBack', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvBegin() [member function] cls.add_method('TlvBegin', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvBegin() const [member function] cls.add_method('TlvBegin', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvClear() [member function] cls.add_method('TlvClear', 'void', []) ## packetbb.h (module 'network'): bool ns3::PbbPacket::TlvEmpty() const [member function] cls.add_method('TlvEmpty', 'bool', [], is_const=True) ## packetbb.h (module 'network'): std::_List_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvEnd() [member function] cls.add_method('TlvEnd', 'std::_List_iterator< ns3::Ptr< ns3::PbbTlv > >', []) ## packetbb.h (module 'network'): std::_List_const_iterator<ns3::Ptr<ns3::PbbTlv> > ns3::PbbPacket::TlvEnd() const [member function] cls.add_method('TlvEnd', 'std::_List_const_iterator< ns3::Ptr< ns3::PbbTlv > >', [], is_const=True) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> ns3::PbbPacket::TlvFront() [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv >', []) ## packetbb.h (module 'network'): ns3::Ptr<ns3::PbbTlv> const ns3::PbbPacket::TlvFront() const [member function] cls.add_method('TlvFront', 'ns3::Ptr< ns3::PbbTlv > const', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPopBack() [member function] cls.add_method('TlvPopBack', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPopFront() [member function] cls.add_method('TlvPopFront', 'void', []) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPushBack(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushBack', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): void ns3::PbbPacket::TlvPushFront(ns3::Ptr<ns3::PbbTlv> tlv) [member function] cls.add_method('TlvPushFront', 'void', [param('ns3::Ptr< ns3::PbbTlv >', 'tlv')]) ## packetbb.h (module 'network'): int ns3::PbbPacket::TlvSize() const [member function] cls.add_method('TlvSize', 'int', [], is_const=True) return def register_Ns3PbbTlv_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') ## packetbb.h (module 'network'): ns3::PbbTlv::PbbTlv(ns3::PbbTlv const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbTlv const &', 'arg0')]) ## packetbb.h (module 'network'): ns3::PbbTlv::PbbTlv() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): void ns3::PbbTlv::Deserialize(ns3::Buffer::Iterator & start) [member function] cls.add_method('Deserialize', 'void', [param('ns3::Buffer::Iterator &', 'start')]) ## packetbb.h (module 'network'): uint32_t ns3::PbbTlv::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetType() const [member function] cls.add_method('GetType', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetTypeExt() const [member function] cls.add_method('GetTypeExt', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): ns3::Buffer ns3::PbbTlv::GetValue() const [member function] cls.add_method('GetValue', 'ns3::Buffer', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasTypeExt() const [member function] cls.add_method('HasTypeExt', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasValue() const [member function] cls.add_method('HasValue', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Print(std::ostream & os, int level) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os'), param('int', 'level')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::Serialize(ns3::Buffer::Iterator & start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator &', 'start')], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetType(uint8_t type) [member function] cls.add_method('SetType', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetTypeExt(uint8_t type) [member function] cls.add_method('SetTypeExt', 'void', [param('uint8_t', 'type')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetValue(ns3::Buffer start) [member function] cls.add_method('SetValue', 'void', [param('ns3::Buffer', 'start')]) ## packetbb.h (module 'network'): void ns3::PbbTlv::SetValue(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('SetValue', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetIndexStart() const [member function] cls.add_method('GetIndexStart', 'uint8_t', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): uint8_t ns3::PbbTlv::GetIndexStop() const [member function] cls.add_method('GetIndexStop', 'uint8_t', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasIndexStart() const [member function] cls.add_method('HasIndexStart', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::HasIndexStop() const [member function] cls.add_method('HasIndexStop', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): bool ns3::PbbTlv::IsMultivalue() const [member function] cls.add_method('IsMultivalue', 'bool', [], is_const=True, visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetIndexStart(uint8_t index) [member function] cls.add_method('SetIndexStart', 'void', [param('uint8_t', 'index')], visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetIndexStop(uint8_t index) [member function] cls.add_method('SetIndexStop', 'void', [param('uint8_t', 'index')], visibility='protected') ## packetbb.h (module 'network'): void ns3::PbbTlv::SetMultivalue(bool isMultivalue) [member function] cls.add_method('SetMultivalue', 'void', [param('bool', 'isMultivalue')], visibility='protected') return def register_Ns3RandomVariableChecker_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')]) return def register_Ns3RandomVariableValue_methods(root_module, cls): ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue() [constructor] cls.add_constructor([]) ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')]) ## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor] cls.add_constructor([param('ns3::RandomVariable const &', 'value')]) ## random-variable.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## random-variable.h (module 'core'): bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## random-variable.h (module 'core'): ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function] cls.add_method('Get', 'ns3::RandomVariable', [], is_const=True) ## random-variable.h (module 'core'): std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## random-variable.h (module 'core'): void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function] cls.add_method('Set', 'void', [param('ns3::RandomVariable const &', 'value')]) return def register_Ns3RateErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel(ns3::RateErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::RateErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::RateErrorModel::RateErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): double ns3::RateErrorModel::GetRate() const [member function] cls.add_method('GetRate', 'double', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::RateErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): ns3::RateErrorModel::ErrorUnit ns3::RateErrorModel::GetUnit() const [member function] cls.add_method('GetUnit', 'ns3::RateErrorModel::ErrorUnit', [], is_const=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRandomVariable(ns3::RandomVariable const & ranvar) [member function] cls.add_method('SetRandomVariable', 'void', [param('ns3::RandomVariable const &', 'ranvar')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetRate(double rate) [member function] cls.add_method('SetRate', 'void', [param('double', 'rate')]) ## error-model.h (module 'network'): void ns3::RateErrorModel::SetUnit(ns3::RateErrorModel::ErrorUnit error_unit) [member function] cls.add_method('SetUnit', 'void', [param('ns3::RateErrorModel::ErrorUnit', 'error_unit')]) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptBit(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptBit', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptByte(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptByte', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): bool ns3::RateErrorModel::DoCorruptPkt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorruptPkt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::RateErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3ReceiveListErrorModel_methods(root_module, cls): ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel(ns3::ReceiveListErrorModel const & arg0) [copy constructor] cls.add_constructor([param('ns3::ReceiveListErrorModel const &', 'arg0')]) ## error-model.h (module 'network'): ns3::ReceiveListErrorModel::ReceiveListErrorModel() [constructor] cls.add_constructor([]) ## error-model.h (module 'network'): std::list<unsigned int, std::allocator<unsigned int> > ns3::ReceiveListErrorModel::GetList() const [member function] cls.add_method('GetList', 'std::list< unsigned int >', [], is_const=True) ## error-model.h (module 'network'): static ns3::TypeId ns3::ReceiveListErrorModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::SetList(std::list<unsigned int, std::allocator<unsigned int> > const & packetlist) [member function] cls.add_method('SetList', 'void', [param('std::list< unsigned int > const &', 'packetlist')]) ## error-model.h (module 'network'): bool ns3::ReceiveListErrorModel::DoCorrupt(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('DoCorrupt', 'bool', [param('ns3::Ptr< ns3::Packet >', 'p')], visibility='private', is_virtual=True) ## error-model.h (module 'network'): void ns3::ReceiveListErrorModel::DoReset() [member function] cls.add_method('DoReset', 'void', [], visibility='private', is_virtual=True) return def register_Ns3SimpleChannel_methods(root_module, cls): ## simple-channel.h (module 'network'): ns3::SimpleChannel::SimpleChannel(ns3::SimpleChannel const & arg0) [copy constructor] cls.add_constructor([param('ns3::SimpleChannel const &', 'arg0')]) ## simple-channel.h (module 'network'): ns3::SimpleChannel::SimpleChannel() [constructor] cls.add_constructor([]) ## simple-channel.h (module 'network'): void ns3::SimpleChannel::Add(ns3::Ptr<ns3::SimpleNetDevice> device) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::SimpleNetDevice >', 'device')]) ## simple-channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::SimpleChannel::GetDevice(uint32_t i) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True) ## simple-channel.h (module 'network'): uint32_t ns3::SimpleChannel::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True) ## simple-channel.h (module 'network'): static ns3::TypeId ns3::SimpleChannel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## simple-channel.h (module 'network'): void ns3::SimpleChannel::Send(ns3::Ptr<ns3::Packet> p, uint16_t protocol, ns3::Mac48Address to, ns3::Mac48Address from, ns3::Ptr<ns3::SimpleNetDevice> sender) [member function] cls.add_method('Send', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from'), param('ns3::Ptr< ns3::SimpleNetDevice >', 'sender')]) return def register_Ns3SimpleNetDevice_methods(root_module, cls): ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice::SimpleNetDevice(ns3::SimpleNetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::SimpleNetDevice const &', 'arg0')]) ## simple-net-device.h (module 'network'): ns3::SimpleNetDevice::SimpleNetDevice() [constructor] cls.add_constructor([]) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::SimpleNetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): uint32_t ns3::SimpleNetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): uint16_t ns3::SimpleNetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Address ns3::SimpleNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::SimpleNetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): static ns3::TypeId ns3::SimpleNetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address to, ns3::Mac48Address from) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from')]) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetChannel(ns3::Ptr<ns3::SimpleChannel> channel) [member function] cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SimpleChannel >', 'channel')]) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::SetReceiveErrorModel(ns3::Ptr<ns3::ErrorModel> em) [member function] cls.add_method('SetReceiveErrorModel', 'void', [param('ns3::Ptr< ns3::ErrorModel >', 'em')]) ## simple-net-device.h (module 'network'): bool ns3::SimpleNetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True) ## simple-net-device.h (module 'network'): void ns3::SimpleNetDevice::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3PbbAddressTlv_methods(root_module, cls): ## packetbb.h (module 'network'): ns3::PbbAddressTlv::PbbAddressTlv() [constructor] cls.add_constructor([]) ## packetbb.h (module 'network'): ns3::PbbAddressTlv::PbbAddressTlv(ns3::PbbAddressTlv const & arg0) [copy constructor] cls.add_constructor([param('ns3::PbbAddressTlv const &', 'arg0')]) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressTlv::GetIndexStart() const [member function] cls.add_method('GetIndexStart', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): uint8_t ns3::PbbAddressTlv::GetIndexStop() const [member function] cls.add_method('GetIndexStop', 'uint8_t', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::HasIndexStart() const [member function] cls.add_method('HasIndexStart', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::HasIndexStop() const [member function] cls.add_method('HasIndexStop', 'bool', [], is_const=True) ## packetbb.h (module 'network'): bool ns3::PbbAddressTlv::IsMultivalue() const [member function] cls.add_method('IsMultivalue', 'bool', [], is_const=True) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetIndexStart(uint8_t index) [member function] cls.add_method('SetIndexStart', 'void', [param('uint8_t', 'index')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetIndexStop(uint8_t index) [member function] cls.add_method('SetIndexStop', 'void', [param('uint8_t', 'index')]) ## packetbb.h (module 'network'): void ns3::PbbAddressTlv::SetMultivalue(bool isMultivalue) [member function] cls.add_method('SetMultivalue', 'void', [param('bool', 'isMultivalue')]) return def register_functions(root_module): module = root_module ## address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeAddressChecker() [free function] module.add_function('MakeAddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## data-rate.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeDataRateChecker() [free function] module.add_function('MakeDataRateChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv4-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv4AddressChecker() [free function] module.add_function('MakeIpv4AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv4-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv4MaskChecker() [free function] module.add_function('MakeIpv4MaskChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv6-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv6AddressChecker() [free function] module.add_function('MakeIpv6AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## ipv6-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeIpv6PrefixChecker() [free function] module.add_function('MakeIpv6PrefixChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## mac48-address.h (module 'network'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeMac48AddressChecker() [free function] module.add_function('MakeMac48AddressChecker', 'ns3::Ptr< ns3::AttributeChecker const >', []) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Address & ad, uint32_t len) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address &', 'ad'), param('uint32_t', 'len')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Ipv4Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Ipv6Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::ReadFrom(ns3::Buffer::Iterator & i, ns3::Mac48Address & ad) [free function] module.add_function('ReadFrom', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Address const & ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Address const &', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Ipv4Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv4Address', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Ipv6Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Ipv6Address', 'ad')]) ## address-utils.h (module 'network'): extern void ns3::WriteTo(ns3::Buffer::Iterator & i, ns3::Mac48Address ad) [free function] module.add_function('WriteTo', 'void', [param('ns3::Buffer::Iterator &', 'i'), param('ns3::Mac48Address', 'ad')]) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_addressUtils(module, root_module): ## address-utils.h (module 'network'): extern bool ns3::addressUtils::IsMulticast(ns3::Address const & ad) [free function] module.add_function('IsMulticast', 'bool', [param('ns3::Address const &', 'ad')]) return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ha; import java.io.IOException; import java.util.Collections; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import static org.apache.hadoop.fs.CommonConfigurationKeys.*; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HealthCheckFailedException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * This class is a daemon which runs in a loop, periodically heartbeating * with an HA service. It is responsible for keeping track of that service's * health and exposing callbacks to the failover controller when the health * status changes. * * Classes which need callbacks should implement the {@link Callback} * interface. */ @InterfaceAudience.Private public class HealthMonitor { private static final Logger LOG = LoggerFactory.getLogger( HealthMonitor.class); private Daemon daemon; private long connectRetryInterval; private long checkIntervalMillis; private long sleepAfterDisconnectMillis; private int rpcConnectRetries; private int rpcTimeout; private volatile boolean shouldRun = true; /** The connected proxy */ private HAServiceProtocol proxy; /** The HA service to monitor */ private final HAServiceTarget targetToMonitor; private final Configuration conf; private State state = State.INITIALIZING; /** * Listeners for state changes */ private List<Callback> callbacks = Collections.synchronizedList( new LinkedList<Callback>()); private List<ServiceStateCallback> serviceStateCallbacks = Collections .synchronizedList(new LinkedList<ServiceStateCallback>()); private HAServiceStatus lastServiceState = new HAServiceStatus( HAServiceState.INITIALIZING); @InterfaceAudience.Private public enum State { /** * The health monitor is still starting up. */ INITIALIZING, /** * The service is not responding to health check RPCs. */ SERVICE_NOT_RESPONDING, /** * The service is connected and healthy. */ SERVICE_HEALTHY, /** * The service is running but unhealthy. */ SERVICE_UNHEALTHY, /** * The health monitor itself failed unrecoverably and can * no longer provide accurate information. */ HEALTH_MONITOR_FAILED; } HealthMonitor(Configuration conf, HAServiceTarget target) { this.targetToMonitor = target; this.conf = conf; this.sleepAfterDisconnectMillis = conf.getLong( HA_HM_SLEEP_AFTER_DISCONNECT_KEY, HA_HM_SLEEP_AFTER_DISCONNECT_DEFAULT); this.checkIntervalMillis = conf.getLong( HA_HM_CHECK_INTERVAL_KEY, HA_HM_CHECK_INTERVAL_DEFAULT); this.connectRetryInterval = conf.getLong( HA_HM_CONNECT_RETRY_INTERVAL_KEY, HA_HM_CONNECT_RETRY_INTERVAL_DEFAULT); this.rpcConnectRetries = conf.getInt(HA_HM_RPC_CONNECT_MAX_RETRIES_KEY, HA_HM_RPC_CONNECT_MAX_RETRIES_DEFAULT); this.rpcTimeout = conf.getInt( HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT); this.daemon = new MonitorDaemon(); } public void addCallback(Callback cb) { this.callbacks.add(cb); } public synchronized void addServiceStateCallback(ServiceStateCallback cb) { this.serviceStateCallbacks.add(cb); } public void shutdown() { LOG.info("Stopping HealthMonitor thread"); shouldRun = false; daemon.interrupt(); } /** * @return the current proxy object to the underlying service. * Note that this may return null in the case that the service * is not responding. Also note that, even if the last indicated * state is healthy, the service may have gone down in the meantime. */ public synchronized HAServiceProtocol getProxy() { return proxy; } private void loopUntilConnected() throws InterruptedException { tryConnect(); while (proxy == null) { Thread.sleep(connectRetryInterval); tryConnect(); } assert proxy != null; } private void tryConnect() { Preconditions.checkState(proxy == null); try { synchronized (this) { proxy = createProxy(); } } catch (IOException e) { LOG.warn("Could not connect to local service at " + targetToMonitor + ": " + e.getMessage()); proxy = null; enterState(State.SERVICE_NOT_RESPONDING); } } /** * Connect to the service to be monitored. Stubbed out for easier testing. * * @throws IOException raised on errors performing I/O. * @return HAServiceProtocol. */ protected HAServiceProtocol createProxy() throws IOException { return targetToMonitor.getHealthMonitorProxy(conf, rpcTimeout, rpcConnectRetries); } private void doHealthChecks() throws InterruptedException { while (shouldRun) { HAServiceStatus status = null; boolean healthy = false; try { status = proxy.getServiceStatus(); proxy.monitorHealth(); healthy = true; } catch (Throwable t) { if (isHealthCheckFailedException(t)) { LOG.warn("Service health check failed for {}", targetToMonitor, t); enterState(State.SERVICE_UNHEALTHY); } else { LOG.warn("Transport-level exception trying to monitor health of {}", targetToMonitor, t); RPC.stopProxy(proxy); proxy = null; enterState(State.SERVICE_NOT_RESPONDING); Thread.sleep(sleepAfterDisconnectMillis); return; } } if (status != null) { setLastServiceStatus(status); } if (healthy) { enterState(State.SERVICE_HEALTHY); } Thread.sleep(checkIntervalMillis); } } private boolean isHealthCheckFailedException(Throwable t) { return ((t instanceof HealthCheckFailedException) || (t instanceof RemoteException && ((RemoteException)t).unwrapRemoteException( HealthCheckFailedException.class) instanceof HealthCheckFailedException)); } private synchronized void setLastServiceStatus(HAServiceStatus status) { this.lastServiceState = status; for (ServiceStateCallback cb : serviceStateCallbacks) { cb.reportServiceStatus(lastServiceState); } } private synchronized void enterState(State newState) { if (newState != state) { LOG.info("Entering state {}", newState); state = newState; synchronized (callbacks) { for (Callback cb : callbacks) { cb.enteredState(newState); } } } } synchronized State getHealthState() { return state; } boolean isAlive() { return daemon.isAlive(); } void join() throws InterruptedException { daemon.join(); } void start() { daemon.start(); } private class MonitorDaemon extends Daemon { private MonitorDaemon() { super(); setName("Health Monitor for " + targetToMonitor); setUncaughtExceptionHandler(new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { LOG.error("Health monitor failed", e); enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED); } }); } @Override public void work() { while (shouldRun) { try { loopUntilConnected(); doHealthChecks(); } catch (InterruptedException ie) { Preconditions.checkState(!shouldRun, "Interrupted but still supposed to run"); } } } } /** * Callback interface for state change events. * * This interface is called from a single thread which also performs * the health monitoring. If the callback processing takes a long time, * no further health checks will be made during this period, nor will * other registered callbacks be called. * * If the callback itself throws an unchecked exception, no other * callbacks following it will be called, and the health monitor * will terminate, entering HEALTH_MONITOR_FAILED state. */ static interface Callback { void enteredState(State newState); } /** * Callback interface for service states. */ static interface ServiceStateCallback { void reportServiceStatus(HAServiceStatus status); } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for ImageNet data preprocessing & prediction decoding. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.utils.data_utils import get_file from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export CLASS_INDEX = None CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json' # Global tensor of imagenet mean for preprocessing symbolic inputs _IMAGENET_MEAN = None def _preprocess_numpy_input(x, data_format, mode): """Preprocesses a Numpy array encoding a batch of images. Arguments: x: Input array, 3D or 4D. data_format: Data format of the image array. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. Returns: Preprocessed Numpy array. """ if mode == 'tf': x /= 127.5 x -= 1. return x if mode == 'torch': x /= 255. mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == 'channels_first': # 'RGB'->'BGR' if x.ndim == 3: x = x[::-1, ...] else: x = x[:, ::-1, ...] else: # 'RGB'->'BGR' x = x[..., ::-1] mean = [103.939, 116.779, 123.68] std = None # Zero-center by mean pixel if data_format == 'channels_first': if x.ndim == 3: x[0, :, :] -= mean[0] x[1, :, :] -= mean[1] x[2, :, :] -= mean[2] if std is not None: x[0, :, :] /= std[0] x[1, :, :] /= std[1] x[2, :, :] /= std[2] else: x[:, 0, :, :] -= mean[0] x[:, 1, :, :] -= mean[1] x[:, 2, :, :] -= mean[2] if std is not None: x[:, 0, :, :] /= std[0] x[:, 1, :, :] /= std[1] x[:, 2, :, :] /= std[2] else: x[..., 0] -= mean[0] x[..., 1] -= mean[1] x[..., 2] -= mean[2] if std is not None: x[..., 0] /= std[0] x[..., 1] /= std[1] x[..., 2] /= std[2] return x def _preprocess_symbolic_input(x, data_format, mode): """Preprocesses a tensor encoding a batch of images. Arguments: x: Input tensor, 3D or 4D. data_format: Data format of the image tensor. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. Returns: Preprocessed tensor. """ global _IMAGENET_MEAN if mode == 'tf': x /= 127.5 x -= 1. return x if mode == 'torch': x /= 255. mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] else: if data_format == 'channels_first': # 'RGB'->'BGR' if K.ndim(x) == 3: x = x[::-1, ...] else: x = x[:, ::-1, ...] else: # 'RGB'->'BGR' x = x[..., ::-1] mean = [103.939, 116.779, 123.68] std = None if _IMAGENET_MEAN is None: _IMAGENET_MEAN = constant_op.constant(-np.array(mean), dtype=K.floatx()) # Zero-center by mean pixel if K.dtype(x) != K.dtype(_IMAGENET_MEAN): x = K.bias_add(x, math_ops.cast(_IMAGENET_MEAN, K.dtype(x)), data_format) else: x = K.bias_add(x, _IMAGENET_MEAN, data_format) if std is not None: x /= std return x @tf_export('keras.applications.resnet50.preprocess_input', 'keras.applications.vgg19.preprocess_input', 'keras.applications.vgg16.preprocess_input') def preprocess_input(x, data_format=None, mode='caffe'): """Preprocesses a tensor or Numpy array encoding a batch of images. Arguments: x: Input Numpy or symbolic tensor, 3D or 4D. data_format: Data format of the image tensor/array. mode: One of "caffe", "tf". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. Returns: Preprocessed tensor or Numpy array. Raises: ValueError: In case of unknown `data_format` argument. """ if data_format is None: data_format = K.image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if isinstance(x, np.ndarray): return _preprocess_numpy_input(x, data_format=data_format, mode=mode) else: return _preprocess_symbolic_input(x, data_format=data_format, mode=mode) @tf_export('keras.applications.nasnet.decode_predictions', 'keras.applications.resnet50.decode_predictions', 'keras.applications.vgg19.decode_predictions', 'keras.applications.vgg16.decode_predictions', 'keras.applications.inception_resnet_v2.decode_predictions', 'keras.applications.inception_v3.decode_predictions', 'keras.applications.densenet.decode_predictions', 'keras.applications.mobilenet.decode_predictions', 'keras.applications.xception.decode_predictions') def decode_predictions(preds, top=5): """Decodes the prediction of an ImageNet model. Arguments: preds: Numpy tensor encoding a batch of predictions. top: Integer, how many top-guesses to return. Returns: A list of lists of top class prediction tuples `(class_name, class_description, score)`. One list of tuples per sample in batch input. Raises: ValueError: In case of invalid shape of the `pred` array (must be 2D). """ global CLASS_INDEX if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError('`decode_predictions` expects ' 'a batch of predictions ' '(i.e. a 2D array of shape (samples, 1000)). ' 'Found array with shape: ' + str(preds.shape)) if CLASS_INDEX is None: fpath = get_file( 'imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models', file_hash='c2c37ea517e94d9795004a39431a14cb') with open(fpath) as f: CLASS_INDEX = json.load(f) results = [] for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices] result.sort(key=lambda x: x[2], reverse=True) results.append(result) return results def _obtain_input_shape(input_shape, default_size, min_size, data_format, require_flatten, weights=None): """Internal utility to compute/validate a model's input shape. Arguments: input_shape: Either None (will return the default network input shape), or a user-provided shape to be validated. default_size: Default input width/height for the model. min_size: Minimum input width/height accepted by the model. data_format: Image data format to use. require_flatten: Whether the model is expected to be linked to a classifier via a Flatten layer. weights: One of `None` (random initialization) or 'imagenet' (pre-training on ImageNet). If weights='imagenet' input channels must be equal to 3. Returns: An integer shape tuple (may include None entries). Raises: ValueError: In case of invalid argument values. """ if weights != 'imagenet' and input_shape and len(input_shape) == 3: if data_format == 'channels_first': if input_shape[0] not in {1, 3}: logging.warning('This model usually expects 1 or 3 input channels. ' 'However, it was passed an input_shape with ' + str(input_shape[0]) + ' input channels.') default_shape = (input_shape[0], default_size, default_size) else: if input_shape[-1] not in {1, 3}: logging.warning('This model usually expects 1 or 3 input channels. ' 'However, it was passed an input_shape with ' + str(input_shape[-1]) + ' input channels.') default_shape = (default_size, default_size, input_shape[-1]) else: if data_format == 'channels_first': default_shape = (3, default_size, default_size) else: default_shape = (default_size, default_size, 3) if weights == 'imagenet' and require_flatten: if input_shape is not None: if input_shape != default_shape: raise ValueError('When setting`include_top=True` ' 'and loading `imagenet` weights, ' '`input_shape` should be ' + str(default_shape) + '.') return default_shape if input_shape: if data_format == 'channels_first': if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[0] != 3 and weights == 'imagenet': raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[1] is not None and input_shape[1] < min_size) or (input_shape[2] is not None and input_shape[2] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + '; got ' '`input_shape=' + str(input_shape) + '`') else: if input_shape is not None: if len(input_shape) != 3: raise ValueError('`input_shape` must be a tuple of three integers.') if input_shape[-1] != 3 and weights == 'imagenet': raise ValueError('The input must have 3 channels; got ' '`input_shape=' + str(input_shape) + '`') if ((input_shape[0] is not None and input_shape[0] < min_size) or (input_shape[1] is not None and input_shape[1] < min_size)): raise ValueError('Input size must be at least ' + str(min_size) + 'x' + str(min_size) + '; got ' '`input_shape=' + str(input_shape) + '`') else: if require_flatten: input_shape = default_shape else: if data_format == 'channels_first': input_shape = (3, None, None) else: input_shape = (None, None, 3) if require_flatten: if None in input_shape: raise ValueError('If `include_top` is True, ' 'you should specify a static `input_shape`. ' 'Got `input_shape=' + str(input_shape) + '`') return input_shape
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import json import mock import os import requests from .. import base from girder import config from girder.api.rest import endpoint from girder.models.user import User def setUpModule(): os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200') config.loadConfig() testPluginPath = os.path.normpath(os.path.join( os.path.dirname(os.path.abspath(__file__)), '..', '..', 'test', 'test_plugins' )) base.mockPluginDir(testPluginPath) base.enabledPlugins = ['test_plugin'] with mock.patch('girder.utility.plugin_utilities.logprint.exception'): base.startServer(mock=False) def tearDownModule(): base.stopServer() class TestEndpointDecoratorException(base.TestCase): """Tests the endpoint decorator exception handling.""" def setUp(self): with mock.patch('girder.utility.plugin_utilities.logprint.exception'): super(TestEndpointDecoratorException, self).setUp() @endpoint def pointlessEndpointAscii(self, path, params): raise Exception('You did something wrong.') @endpoint def pointlessEndpointUnicode(self, path, params): raise Exception(u'\u0400 cannot be converted to ascii.') @endpoint def pointlessEndpointBytes(self, path, params): raise Exception('\x80\x80 cannot be converted to unicode or ascii.') def testEndpointExceptionAscii(self): resp = self.pointlessEndpointAscii('', {}).decode() obj = json.loads(resp) self.assertEqual(obj['type'], 'internal') def testEndpointExceptionUnicode(self): resp = self.pointlessEndpointUnicode('', {}).decode('utf8') obj = json.loads(resp) self.assertEqual(obj['type'], 'internal') def testEndpointExceptionBytes(self): resp = self.pointlessEndpointBytes('', {}).decode('utf8') obj = json.loads(resp) self.assertEqual(obj['type'], 'internal') def testBoundHandlerDecorator(self): user = User().createUser('tester', 'password', 'Test', 'User', 'test@test.com') resp = self.request('/collection/unbound/default/noargs', user=user, params={ 'val': False }) self.assertStatusOk(resp) self.assertEqual(resp.json, True) resp = self.request('/collection/unbound/default', user=user, params={ 'val': False }) self.assertStatusOk(resp) self.assertEqual(resp.json, True) resp = self.request('/collection/unbound/explicit', user=user) self.assertStatusOk(resp) self.assertEqual(resp.json, { 'name': 'collection', 'userLogin': 'tester' }) def testRawResponse(self): resp = self.request('/other/rawWithDecorator', isJson=False) self.assertStatusOk(resp) self.assertEqual(self.getBody(resp), 'this is a raw response') resp = self.request('/other/rawInternal', isJson=False) self.assertStatusOk(resp) self.assertEqual(self.getBody(resp), 'this is also a raw response') # We must make an actual request in order to test response encoding # at the WSGI server layer. resp = requests.get( 'http://127.0.0.1:%s/api/v1/other/rawReturningText' % os.environ['GIRDER_TEST_PORT']) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.headers['Content-Type'], 'text/plain;charset=utf-8') self.assertEqual(resp.content, b'this is not encoded \xf0\x9f\x91\x8d') self.assertEqual(resp.text, u'this is not encoded \U0001F44D')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python from MurkyWaters import MurkyWaters import time, json, sys import BaseHTTPServer import os import zlib def save_file( config, resid, data ): path = os.path.join( config['data-dir'], "%x"%resid ) with open( path, 'wb' ) as handle: handle.write( zlib.compress( data ) ) murky = None files = {} fn = "murky-config.json" if len( sys.argv ) > 1: fn = sys.argv[1] config = json.load( open( fn ) ) class MurkyProxy( BaseHTTPServer.BaseHTTPRequestHandler ): def do_GET( self ): global murky, files, config if len( self.path ) > 1: try: resid = int( self.path[1:] ) #print "Fetching", resid if resid in files: content = files[resid] else: content = murky.fetch( resid ) if content: self.send_response( 200 ) self.end_headers() self.wfile.write( content ) else: self.send_response( 404 ) self.end_headers() except ValueError: self.send_response( 404 ) self.end_headers() return else: self.send_response( 404 ) self.end_headers() #self.send_response( 200 ) #self.end_headers() return def do_PUT( self ): global murky, files, config if len( self.path ) > 1: try: resid = int( self.path[1:] ) #print resid #content = murky.fetch( resid ) L = int( self.headers['content-length'] ) data = self.rfile.read( L ) #print "Adding", resid, ":", data murky.add( resid, data ) murky.propagate( resid ) save_file( config, resid, data ) files[resid] = data self.send_response( 200 ) self.end_headers() self.wfile.write( json.dumps( {'response': 'Ok.'} ) + "\n" ) except ValueError: self.send_response( 400 ) self.end_headers() return else: self.send_response( 404 ) self.end_headers() if __name__ == '__main__': server = BaseHTTPServer.HTTPServer( (config['http']['host'], config['http']['port']), MurkyProxy ) murky = MurkyWaters( save_file, config ) murky.start() servers = json.load( open( config['servers-list'] ) ) for entry in servers: murky.server( entry['host'], entry['port'] ) local_files = os.listdir( config['data-dir'] ) for fn in local_files: try: resid = int( fn, 16 ) path = os.path.join( config['data-dir'], fn ) data = zlib.decompress( open( path, 'rb' ).read() ) files[resid] = data murky.add( resid, data ) if config['behaviour']['propagate-on-startup']: murky.propagate( resid ) except ValueError: pass try: print "[%s] Murky Waters started. (press ctrl-c to stop)"% time.strftime( "%Y-%m-%d %H:%M:%S" ) server.serve_forever() except KeyboardInterrupt: pass server.shutdown() murky.stop()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import rospy from threading import Timer from flexbe_core.logger import Logger class ProxyServiceCaller(object): """ A proxy for calling services. """ _services = {} def __init__(self, topics={}, persistent=False, wait_duration=10): """ Initializes the proxy with optionally a given set of clients. @type topics: dictionary string - message class @param topics: A dictionay containing a collection of topic - message type pairs. @type persistent: bool @param persistent: Defines if the service callers are persistent. @type wait_duration: int @param wait_duration: Defines how long to wait for the given services if not available right now. """ for topic, msg_type in topics.items(): self.setupService(topic, msg_type, persistent, wait_duration) def setupService(self, topic, msg_type, persistent=False, wait_duration=10): """ Tries to set up a service caller for calling it later. @type topic: string @param topic: The topic of the service to call. @type msg_type: service class @param msg_type: The type of messages of this service. @type persistent: bool @param persistent: Defines if this service caller is persistent. @type wait_duration: int @param wait_duration: Defines how long to wait for the given service if it is not available right now. """ if topic not in ProxyServiceCaller._services: ProxyServiceCaller._services[topic] = rospy.ServiceProxy(topic, msg_type, persistent) self._check_service_available(topic, wait_duration) def is_available(self, topic): """ Checks if the service on the given topic is available. @type topic: string @param topic: The topic of interest. """ return self._check_service_available(topic) def call(self, topic, request): """ Performs a service call on the given topic. @type topic: string @param topic: The topic to call. @type request: service @param request: The request to send to this service. """ if not self._check_service_available(topic): raise ValueError('Cannot call service client %s: Topic not available.' % topic) # call service (forward any exceptions) return ProxyServiceCaller._services[topic].call(request) def _check_service_available(self, topic, wait_duration=1): """ Checks whether a service is available. @type topic: string @param topic: The topic of the service. @type wait_duration: int @param wait_duration: Defines how long to wait for the given service if it is not available right now. """ client = ProxyServiceCaller._services.get(topic) if client is None: Logger.logerr("Service client %s not yet registered, need to add it first!" % topic) return False warning_sent = False available = False try: t = Timer(1, self._print_wait_warning, [topic]) t.start() rospy.wait_for_service(topic, wait_duration) available = True except rospy.exceptions.ROSException: available = False try: t.cancel() except Exception: # already printed the warning warning_sent = True if not available: Logger.logerr("Service client %s timed out!" % topic) return False else: if warning_sent: Logger.loginfo("Finally found service %s..." % (topic)) return True def _print_wait_warning(self, topic): Logger.logwarn("Waiting for service %s..." % (topic))
unknown
codeparrot/codeparrot-clean
use std::env; use std::fs; use std::path::PathBuf; use std::process::Command; use std::str; const PRIVATE: &str = "\ #[doc(hidden)] pub mod __private$$ { #[doc(hidden)] pub use crate::private::*; } "; // The rustc-cfg strings below are *not* public API. Please let us know by // opening a GitHub issue if your build environment requires some way to enable // these cfgs other than by executing our build script. fn main() { println!("cargo:rerun-if-changed=build.rs"); let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let patch_version = env::var("CARGO_PKG_VERSION_PATCH").unwrap(); let module = PRIVATE.replace("$$", &patch_version); fs::write(out_dir.join("private.rs"), module).unwrap(); let minor = match rustc_minor_version() { Some(minor) => minor, None => return, }; if minor >= 77 { println!("cargo:rustc-check-cfg=cfg(if_docsrs_then_no_serde_core)"); println!("cargo:rustc-check-cfg=cfg(no_core_cstr)"); println!("cargo:rustc-check-cfg=cfg(no_core_error)"); println!("cargo:rustc-check-cfg=cfg(no_core_net)"); println!("cargo:rustc-check-cfg=cfg(no_core_num_saturating)"); println!("cargo:rustc-check-cfg=cfg(no_diagnostic_namespace)"); println!("cargo:rustc-check-cfg=cfg(no_serde_derive)"); println!("cargo:rustc-check-cfg=cfg(no_std_atomic)"); println!("cargo:rustc-check-cfg=cfg(no_std_atomic64)"); println!("cargo:rustc-check-cfg=cfg(no_target_has_atomic)"); } let target = env::var("TARGET").unwrap(); let emscripten = target == "asmjs-unknown-emscripten" || target == "wasm32-unknown-emscripten"; // Support for #[cfg(target_has_atomic = "...")] stabilized in Rust 1.60. if minor < 60 { println!("cargo:rustc-cfg=no_target_has_atomic"); // Allowlist of archs that support std::sync::atomic module. This is // based on rustc's compiler/rustc_target/src/spec/*.rs. let has_atomic64 = target.starts_with("x86_64") || target.starts_with("i686") || target.starts_with("aarch64") || target.starts_with("powerpc64") || target.starts_with("sparc64") || target.starts_with("mips64el") || target.starts_with("riscv64"); let has_atomic32 = has_atomic64 || emscripten; if minor < 34 || !has_atomic64 { println!("cargo:rustc-cfg=no_std_atomic64"); } if minor < 34 || !has_atomic32 { println!("cargo:rustc-cfg=no_std_atomic"); } } // Current minimum supported version of serde_derive crate is Rust 1.61. if minor < 61 { println!("cargo:rustc-cfg=no_serde_derive"); } // Support for core::ffi::CStr and alloc::ffi::CString stabilized in Rust 1.64. // https://blog.rust-lang.org/2022/09/22/Rust-1.64.0.html#c-compatible-ffi-types-in-core-and-alloc if minor < 64 { println!("cargo:rustc-cfg=no_core_cstr"); } // Support for core::num::Saturating and std::num::Saturating stabilized in Rust 1.74 // https://blog.rust-lang.org/2023/11/16/Rust-1.74.0.html#stabilized-apis if minor < 74 { println!("cargo:rustc-cfg=no_core_num_saturating"); } // Support for core::net stabilized in Rust 1.77. // https://blog.rust-lang.org/2024/03/21/Rust-1.77.0.html if minor < 77 { println!("cargo:rustc-cfg=no_core_net"); } // Support for the `#[diagnostic]` tool attribute namespace // https://blog.rust-lang.org/2024/05/02/Rust-1.78.0.html#diagnostic-attributes if minor < 78 { println!("cargo:rustc-cfg=no_diagnostic_namespace"); } // The Error trait became available in core in 1.81. // https://blog.rust-lang.org/2024/09/05/Rust-1.81.0.html#coreerrorerror if minor < 81 { println!("cargo:rustc-cfg=no_core_error"); } } fn rustc_minor_version() -> Option<u32> { let rustc = env::var_os("RUSTC")?; let output = Command::new(rustc).arg("--version").output().ok()?; let version = str::from_utf8(&output.stdout).ok()?; let mut pieces = version.split('.'); if pieces.next() != Some("rustc 1") { return None; } pieces.next()?.parse().ok() }
rust
github
https://github.com/nodejs/node
deps/crates/vendor/serde_core/build.rs
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_KERNEL_H_ #define TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_KERNEL_H_ #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h" namespace mlrt { namespace bc { class Kernel { public: struct StorageType { using Self = StorageType; DEFINE_BYTECODE_FIELD(uint32_t, code); DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, arguments); DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, results); DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, attributes); DEFINE_BYTECODE_FIELD(bc::Vector<uint8_t>, last_uses); }; class Constructor { public: Constructor(Allocator* allocator, BcAddr_t address) : allocator_(allocator), address_(address) {} void set_code(uint32_t code) { StorageType::construct_code(allocator_, address_, code); } template <typename... Args> auto construct_arguments(Args&&... args) { return StorageType::construct_arguments(allocator_, address_, std::forward<Args>(args)...); } template <typename... Args> auto construct_results(Args&&... args) { return StorageType::construct_results(allocator_, address_, std::forward<Args>(args)...); } template <typename... Args> auto construct_attributes(Args&&... args) { return StorageType::construct_attributes(allocator_, address_, std::forward<Args>(args)...); } template <typename... Args> auto construct_last_uses(Args&&... args) { return StorageType::construct_last_uses(allocator_, address_, std::forward<Args>(args)...); } BcAddr_t address() const { return address_; } private: Allocator* allocator_; BcAddr_t address_; }; using NonTrivialConstructorType = Constructor; explicit Kernel(const char* p) : p_(p) {} Kernel() : p_(nullptr) {} uint32_t code() const { return StorageType::read_code(p_); } Vector<uint32_t> arguments() const { return StorageType::read_arguments(p_); } Vector<uint32_t> results() const { return StorageType::read_results(p_); } Vector<uint32_t> attributes() const { return StorageType::read_attributes(p_); } Vector<uint8_t> last_uses() const { return StorageType::read_last_uses(p_); } private: const char* p_; }; } // namespace bc } // namespace mlrt #endif // TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_KERNEL_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/tfrt/mlrt/bytecode/kernel.h
""" Parser and evaluator for FormulaResponse and NumericalResponse Uses pyparsing to parse. Main function as of now is evaluator(). """ import math import operator import numbers import numpy import scipy.constants import functions from pyparsing import ( Word, Literal, CaselessLiteral, ZeroOrMore, MatchFirst, Optional, Forward, Group, ParseResults, stringEnd, Suppress, Combine, alphas, nums, alphanums ) DEFAULT_FUNCTIONS = { 'sin': numpy.sin, 'cos': numpy.cos, 'tan': numpy.tan, 'sec': functions.sec, 'csc': functions.csc, 'cot': functions.cot, 'sqrt': numpy.sqrt, 'log10': numpy.log10, 'log2': numpy.log2, 'ln': numpy.log, 'exp': numpy.exp, 'arccos': numpy.arccos, 'arcsin': numpy.arcsin, 'arctan': numpy.arctan, 'arcsec': functions.arcsec, 'arccsc': functions.arccsc, 'arccot': functions.arccot, 'abs': numpy.abs, 'fact': math.factorial, 'factorial': math.factorial, 'sinh': numpy.sinh, 'cosh': numpy.cosh, 'tanh': numpy.tanh, 'sech': functions.sech, 'csch': functions.csch, 'coth': functions.coth, 'arcsinh': numpy.arcsinh, 'arccosh': numpy.arccosh, 'arctanh': numpy.arctanh, 'arcsech': functions.arcsech, 'arccsch': functions.arccsch, 'arccoth': functions.arccoth } DEFAULT_VARIABLES = { 'i': numpy.complex(0, 1), 'j': numpy.complex(0, 1), 'e': numpy.e, 'pi': numpy.pi, 'k': scipy.constants.k, # Boltzmann: 1.3806488e-23 (Joules/Kelvin) 'c': scipy.constants.c, # Light Speed: 2.998e8 (m/s) 'T': 298.15, # Typical room temperature: 298.15 (Kelvin), same as 25C/77F 'q': scipy.constants.e # Fund. Charge: 1.602176565e-19 (Coulombs) } # We eliminated the following extreme suffixes: # P (1e15), E (1e18), Z (1e21), Y (1e24), # f (1e-15), a (1e-18), z (1e-21), y (1e-24) # since they're rarely used, and potentially confusing. # They may also conflict with variables if we ever allow e.g. # 5R instead of 5*R SUFFIXES = { '%': 0.01, 'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12, 'c': 1e-2, 'm': 1e-3, 'u': 1e-6, 'n': 1e-9, 'p': 1e-12 } class UndefinedVariable(Exception): """ Indicate when a student inputs a variable which was not expected. """ pass def lower_dict(input_dict): """ Convert all keys in a dictionary to lowercase; keep their original values. Keep in mind that it is possible (but not useful?) to define different variables that have the same lowercase representation. It would be hard to tell which is used in the final dict and which isn't. """ return {k.lower(): v for k, v in input_dict.iteritems()} # The following few functions define evaluation actions, which are run on lists # of results from each parse component. They convert the strings and (previously # calculated) numbers into the number that component represents. def super_float(text): """ Like float, but with SI extensions. 1k goes to 1000. """ if text[-1] in SUFFIXES: return float(text[:-1]) * SUFFIXES[text[-1]] else: return float(text) def eval_number(parse_result): """ Create a float out of its string parts. e.g. [ '7.13', 'e', '3' ] -> 7130 Calls super_float above. """ return super_float("".join(parse_result)) def eval_atom(parse_result): """ Return the value wrapped by the atom. In the case of parenthesis, ignore them. """ # Find first number in the list result = next(k for k in parse_result if isinstance(k, numbers.Number)) return result def eval_power(parse_result): """ Take a list of numbers and exponentiate them, right to left. e.g. [ 2, 3, 2 ] -> 2^3^2 = 2^(3^2) -> 512 (not to be interpreted (2^3)^2 = 64) """ # `reduce` will go from left to right; reverse the list. parse_result = reversed( [k for k in parse_result if isinstance(k, numbers.Number)] # Ignore the '^' marks. ) # Having reversed it, raise `b` to the power of `a`. power = reduce(lambda a, b: b ** a, parse_result) return power def eval_parallel(parse_result): """ Compute numbers according to the parallel resistors operator. BTW it is commutative. Its formula is given by out = 1 / (1/in1 + 1/in2 + ...) e.g. [ 1, 2 ] -> 2/3 Return NaN if there is a zero among the inputs. """ if len(parse_result) == 1: return parse_result[0] if 0 in parse_result: return float('nan') reciprocals = [1. / e for e in parse_result if isinstance(e, numbers.Number)] return 1. / sum(reciprocals) def eval_sum(parse_result): """ Add the inputs, keeping in mind their sign. [ 1, '+', 2, '-', 3 ] -> 0 Allow a leading + or -. """ total = 0.0 current_op = operator.add for token in parse_result: if token == '+': current_op = operator.add elif token == '-': current_op = operator.sub else: total = current_op(total, token) return total def eval_product(parse_result): """ Multiply the inputs. [ 1, '*', 2, '/', 3 ] -> 0.66 """ prod = 1.0 current_op = operator.mul for token in parse_result: if token == '*': current_op = operator.mul elif token == '/': current_op = operator.truediv else: prod = current_op(prod, token) return prod def add_defaults(variables, functions, case_sensitive): """ Create dictionaries with both the default and user-defined variables. """ all_variables = dict(DEFAULT_VARIABLES) all_functions = dict(DEFAULT_FUNCTIONS) all_variables.update(variables) all_functions.update(functions) if not case_sensitive: all_variables = lower_dict(all_variables) all_functions = lower_dict(all_functions) return (all_variables, all_functions) def evaluator(variables, functions, math_expr, case_sensitive=False): """ Evaluate an expression; that is, take a string of math and return a float. -Variables are passed as a dictionary from string to value. They must be python numbers. -Unary functions are passed as a dictionary from string to function. """ # No need to go further. if math_expr.strip() == "": return float('nan') # Parse the tree. math_interpreter = ParseAugmenter(math_expr, case_sensitive) math_interpreter.parse_algebra() # Get our variables together. all_variables, all_functions = add_defaults(variables, functions, case_sensitive) # ...and check them math_interpreter.check_variables(all_variables, all_functions) # Create a recursion to evaluate the tree. if case_sensitive: casify = lambda x: x else: casify = lambda x: x.lower() # Lowercase for case insens. evaluate_actions = { 'number': eval_number, 'variable': lambda x: all_variables[casify(x[0])], 'function': lambda x: all_functions[casify(x[0])](x[1]), 'atom': eval_atom, 'power': eval_power, 'parallel': eval_parallel, 'product': eval_product, 'sum': eval_sum } return math_interpreter.reduce_tree(evaluate_actions) class ParseAugmenter(object): """ Holds the data for a particular parse. Retains the `math_expr` and `case_sensitive` so they needn't be passed around method to method. Eventually holds the parse tree and sets of variables as well. """ def __init__(self, math_expr, case_sensitive=False): """ Create the ParseAugmenter for a given math expression string. Do the parsing later, when called like `OBJ.parse_algebra()`. """ self.case_sensitive = case_sensitive self.math_expr = math_expr self.tree = None self.variables_used = set() self.functions_used = set() def vpa(tokens): """ When a variable is recognized, store it in `variables_used`. """ varname = tokens[0][0] self.variables_used.add(varname) def fpa(tokens): """ When a function is recognized, store it in `functions_used`. """ varname = tokens[0][0] self.functions_used.add(varname) self.variable_parse_action = vpa self.function_parse_action = fpa def parse_algebra(self): """ Parse an algebraic expression into a tree. Store a `pyparsing.ParseResult` in `self.tree` with proper groupings to reflect parenthesis and order of operations. Leave all operators in the tree and do not parse any strings of numbers into their float versions. Adding the groups and result names makes the `repr()` of the result really gross. For debugging, use something like print OBJ.tree.asXML() """ # 0.33 or 7 or .34 or 16. number_part = Word(nums) inner_number = (number_part + Optional("." + Optional(number_part))) | ("." + number_part) # pyparsing allows spaces between tokens--`Combine` prevents that. inner_number = Combine(inner_number) # SI suffixes and percent. number_suffix = MatchFirst(Literal(k) for k in SUFFIXES.keys()) # 0.33k or 17 plus_minus = Literal('+') | Literal('-') number = Group( Optional(plus_minus) + inner_number + Optional(CaselessLiteral("E") + Optional(plus_minus) + number_part) + Optional(number_suffix) ) number = number("number") # Predefine recursive variables. expr = Forward() # Handle variables passed in. They must start with letters/underscores # and may contain numbers afterward. inner_varname = Word(alphas + "_", alphanums + "_") varname = Group(inner_varname)("variable") varname.setParseAction(self.variable_parse_action) # Same thing for functions. function = Group(inner_varname + Suppress("(") + expr + Suppress(")"))("function") function.setParseAction(self.function_parse_action) atom = number | function | varname | "(" + expr + ")" atom = Group(atom)("atom") # Do the following in the correct order to preserve order of operation. pow_term = atom + ZeroOrMore("^" + atom) pow_term = Group(pow_term)("power") par_term = pow_term + ZeroOrMore('||' + pow_term) # 5k || 4k par_term = Group(par_term)("parallel") prod_term = par_term + ZeroOrMore((Literal('*') | Literal('/')) + par_term) # 7 * 5 / 4 prod_term = Group(prod_term)("product") sum_term = Optional(plus_minus) + prod_term + ZeroOrMore(plus_minus + prod_term) # -5 + 4 - 3 sum_term = Group(sum_term)("sum") # Finish the recursion. expr << sum_term # pylint: disable=pointless-statement self.tree = (expr + stringEnd).parseString(self.math_expr)[0] def reduce_tree(self, handle_actions, terminal_converter=None): """ Call `handle_actions` recursively on `self.tree` and return result. `handle_actions` is a dictionary of node names (e.g. 'product', 'sum', etc&) to functions. These functions are of the following form: -input: a list of processed child nodes. If it includes any terminal nodes in the list, they will be given as their processed forms also. -output: whatever to be passed to the level higher, and what to return for the final node. `terminal_converter` is a function that takes in a token and returns a processed form. The default of `None` just leaves them as strings. """ def handle_node(node): """ Return the result representing the node, using recursion. Call the appropriate `handle_action` for this node. As its inputs, feed it the output of `handle_node` for each child node. """ if not isinstance(node, ParseResults): # Then treat it as a terminal node. if terminal_converter is None: return node else: return terminal_converter(node) node_name = node.getName() if node_name not in handle_actions: # pragma: no cover raise Exception(u"Unknown branch name '{}'".format(node_name)) action = handle_actions[node_name] handled_kids = [handle_node(k) for k in node] return action(handled_kids) # Find the value of the entire tree. return handle_node(self.tree) def check_variables(self, valid_variables, valid_functions): """ Confirm that all the variables used in the tree are valid/defined. Otherwise, raise an UndefinedVariable containing all bad variables. """ if self.case_sensitive: casify = lambda x: x else: casify = lambda x: x.lower() # Lowercase for case insens. # Test if casify(X) is valid, but return the actual bad input (i.e. X) bad_vars = set(var for var in self.variables_used if casify(var) not in valid_variables) bad_vars.update(func for func in self.functions_used if casify(func) not in valid_functions) if bad_vars: raise UndefinedVariable(' '.join(sorted(bad_vars)))
unknown
codeparrot/codeparrot-clean
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_chr from ..utils import ( determine_ext, ExtractorError, ) class OpenloadIE(InfoExtractor): _VALID_URL = r'https?://(?:openload\.(?:co|io)|oload\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)' _TESTS = [{ 'url': 'https://openload.co/f/kUEfGclsU9o', 'md5': 'bf1c059b004ebc7a256f89408e65c36e', 'info_dict': { 'id': 'kUEfGclsU9o', 'ext': 'mp4', 'title': 'skyrim_no-audio_1080.mp4', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'https://openload.co/embed/rjC09fkPLYs', 'info_dict': { 'id': 'rjC09fkPLYs', 'ext': 'mp4', 'title': 'movie.mp4', 'thumbnail': r're:^https?://.*\.jpg$', 'subtitles': { 'en': [{ 'ext': 'vtt', }], }, }, 'params': { 'skip_download': True, # test subtitles only }, }, { 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4', 'only_matching': True, }, { 'url': 'https://openload.io/f/ZAn6oz-VZGE/', 'only_matching': True, }, { 'url': 'https://openload.co/f/_-ztPaZtMhM/', 'only_matching': True, }, { # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout # for title and ext 'url': 'https://openload.co/embed/Sxz5sADo82g/', 'only_matching': True, }, { 'url': 'https://oload.tv/embed/KnG-kKZdcfY/', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\']((?:https?://)?(?:openload\.(?:co|io)|oload\.tv)/embed/[a-zA-Z0-9-_]+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id) if 'File not found' in webpage or 'deleted by the owner' in webpage: raise ExtractorError('File not found', expected=True) ol_id = self._search_regex( '<span[^>]+id="[^"]+"[^>]*>([0-9A-Za-z]+)</span>', webpage, 'openload ID') decoded = '' a = ol_id[0:24] b = [] for i in range(0, len(a), 8): b.append(int(a[i:i + 8] or '0', 16)) ol_id = ol_id[24:] j = 0 k = 0 while j < len(ol_id): c = 128 d = 0 e = 0 f = 0 _more = True while _more: if j + 1 >= len(ol_id): c = 143 f = int(ol_id[j:j + 2] or '0', 16) j += 2 d += (f & 127) << e e += 7 _more = f >= c g = d ^ b[k % 3] for i in range(4): char_dec = (g >> 8 * i) & (c + 127) char = compat_chr(char_dec) if char != '#': decoded += char k += 1 video_url = 'https://openload.co/stream/%s?mime=true' video_url = video_url % decoded title = self._og_search_title(webpage, default=None) or self._search_regex( r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, 'title', default=None) or self._html_search_meta( 'description', webpage, 'title', fatal=True) entries = self._parse_html5_media_entries(url, webpage, video_id) subtitles = entries[0]['subtitles'] if entries else None info_dict = { 'id': video_id, 'title': title, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'url': video_url, # Seems all videos have extensions in their titles 'ext': determine_ext(title, 'mp4'), 'subtitles': subtitles, } return info_dict
unknown
codeparrot/codeparrot-clean
""" Manage AWS ElastiCache Only Redis on Elasticache is supported at the moment """ import getpass import logging import hashlib import boto3 import botocore from semantic_version import Spec, Version from .disco_config import read_config from .disco_route53 import DiscoRoute53 from .exceptions import CommandError from .resource_helper import throttled_call logger = logging.getLogger(__name__) class DiscoElastiCache(object): """ A simple class to manage ElastiCache Default Maintenence windown is set as sat 5:00am to 6:00am. The Preferred Maintenance Window works on UTC. """ DEFAULT_MAINTENANCE_WINDOW = "sat:05:00-sat:06:00" def __init__(self, vpc, config_file='disco_elasticache.ini', aws=None, route53=None): self.vpc = vpc self.conn = boto3.client('elasticache') self.config_file = config_file self._config = None # lazily initialized self.route53 = route53 or DiscoRoute53() self.aws = aws @property def config(self): """lazy load config""" if not self._config: try: self._config = read_config(self.config_file) except Exception: raise return self._config def list(self): """List all cache clusters in environment""" response = throttled_call(self.conn.describe_replication_groups) groups = [group for group in response.get('ReplicationGroups', []) if group['Description'].startswith(self.vpc.environment_name + '-')] return sorted(groups, key=lambda group: (group['Description'])) def update(self, cluster_name): """ Create a new cluster or modify an existing one based on the config file Modifying tags, number of nodes, instance type, engine type, and port is not supported Args: cluster_name (str): name of cluster maintenance_window(str): accept Preferred Maintenance Window value or assigns default value. """ meta_network = self._get_option(cluster_name, 'meta_network') or self.aws.get_default_meta_network() if not self._get_subnet_group(meta_network): self._create_subnet_group(meta_network) maintenance_window = self._get_option(cluster_name, 'maintenance_window') or self.DEFAULT_MAINTENANCE_WINDOW engine_version = self._get_option(cluster_name, 'engine_version') instance_type = self._get_option(cluster_name, 'instance_type') parameter_group = self._get_option(cluster_name, 'parameter_group') num_nodes = int(self._get_option(cluster_name, 'num_nodes')) port = int(self._get_option(cluster_name, 'port')) auto_failover = self._has_auto_failover(engine_version, instance_type, num_nodes) domain_name = self._get_option(cluster_name, 'domain_name') or self.aws.get_default_domain_name() tags = [{ 'Key': 'product_line', 'Value': self._get_option(cluster_name, 'product_line') or self.aws.get_default_product_line('') }, { 'Key': 'owner', 'Value': getpass.getuser() }, { 'Key': 'name', 'Value': cluster_name }, { 'Key': 'environment', 'Value': self.vpc.environment_name }] cache_cluster = self._get_redis_cluster(cluster_name) if not cache_cluster: self._create_redis_cluster(cluster_name, engine_version, num_nodes, instance_type, parameter_group, port, meta_network, auto_failover, domain_name, tags, maintenance_window) else: if cache_cluster['Status'] == 'available': self._modify_redis_cluster(cluster_name, engine_version, parameter_group, auto_failover, domain_name, maintenance_window) else: logger.error('Unable to update cache cluster %s. Its status is not available', cache_cluster['Description']) def update_all(self): """Update all clusters in environment to match config""" sections = [section for section in self.config.sections() if section.startswith(self.vpc.environment_name + ':')] for section in sections: cluster_name = section.split(':')[1] self.update(cluster_name) def delete(self, cluster_name, wait=False): """ Delete a cache cluster Args: cluster_name (str): name of cluster wait (bool): block until cluster is deleted """ cluster = self._get_redis_cluster(cluster_name) if not cluster: logger.info('Cache cluster %s does not exist. Nothing to delete', cluster_name) return logger.info('Deleting cache cluster %s', cluster['Description']) throttled_call(self.conn.delete_replication_group, ReplicationGroupId=cluster['ReplicationGroupId']) if 'ConfigurationEndpoint' in cluster: address = cluster['ConfigurationEndpoint']['Address'] else: address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address'] self.route53.delete_records_by_value('CNAME', address) if wait: self.conn.get_waiter('replication_group_deleted').wait( ReplicationGroupId=cluster['ReplicationGroupId']) def delete_all_cache_clusters(self, wait=False): """ Delete all cache clusters in environment Args: wait (bool): block until all cache clusters are deleted """ clusters = self.list() for cluster in clusters: logger.info('Deleting cache cluster %s', cluster['Description']) throttled_call(self.conn.delete_replication_group, ReplicationGroupId=cluster['ReplicationGroupId']) if 'ConfigurationEndpoint' in cluster: address = cluster['ConfigurationEndpoint']['Address'] else: address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address'] self.route53.delete_records_by_value('CNAME', address) if wait: for cluster in clusters: self.conn.get_waiter('replication_group_deleted').wait( ReplicationGroupId=cluster['ReplicationGroupId']) def delete_all_subnet_groups(self): """Delete all subnet groups in environment""" response = throttled_call(self.conn.describe_cache_subnet_groups) subnet_groups = [group for group in response.get('CacheSubnetGroups', []) if group['CacheSubnetGroupName'].startswith(self.vpc.environment_name + '-')] for group in subnet_groups: logger.info('Deleting cache subnet group %s', group['CacheSubnetGroupName']) throttled_call(self.conn.delete_cache_subnet_group, CacheSubnetGroupName=group['CacheSubnetGroupName']) def _get_redis_cluster(self, cluster_name): """Returns a Redis Replication group by its name""" replication_group_id = self._get_redis_replication_group_id(cluster_name) try: response = throttled_call(self.conn.describe_replication_groups, ReplicationGroupId=replication_group_id) groups = response.get('ReplicationGroups', []) return groups[0] if groups else None except Exception: return None # too many arguments and local variables for pylint # pylint: disable=R0913, R0914 def _create_redis_cluster(self, cluster_name, engine_version, num_nodes, instance_type, parameter_group, port, meta_network_name, auto_failover, domain_name, tags, maintenance_window): """ Create a redis cache cluster Redis clusters are actually 'Replication Groups' in ElastiCache. Each Replication Group is a set of single node Redis Cache Clusters with one read/write cluster and the rest as read only. Waits until cluster is created Args: cluster_name (str): name of cluster engine_version (str): redis version to use num_nodes (int): number of nodes in replication group. must be at least 2 if auto_failover is on instance_type (str): instance types. only allowed to use instance types that start with 'cache.' parameter_group (str): name of parameter group to use port (int): port to make cache available on meta_network_name (str): meta network to use (intranet, tunnel, etc) auto_failover (bool): enable automatic promotion of read only cluster when primary fails. only supported for redis versions>2.8.6. not allowed for T1 and T2 instance types. domain_name (str): hosted zone id to use for Route53 domain name tags (List[dict]): list of tags to add to replication group maintenance_window(string): specifies the weekly time range (of at least 1 hour) in UTC during which maintenance on the cache cluster is performed. """ replication_group_id = self._get_redis_replication_group_id(cluster_name) description = self._get_redis_description(cluster_name) meta_network = self.vpc.networks[meta_network_name] subnet_group = self._get_subnet_group_name(meta_network_name) logger.info('Creating "%s" Redis cache', description) throttled_call(self.conn.create_replication_group, ReplicationGroupId=replication_group_id, ReplicationGroupDescription=description, NumCacheClusters=num_nodes, CacheNodeType=instance_type, Engine='redis', EngineVersion=engine_version, CacheParameterGroupName=parameter_group, CacheSubnetGroupName=subnet_group, SecurityGroupIds=[meta_network.security_group.id], Port=port, AutomaticFailoverEnabled=auto_failover, Tags=tags, PreferredMaintenanceWindow=maintenance_window) self.conn.get_waiter('replication_group_available').wait( ReplicationGroupId=replication_group_id ) cluster = self._get_redis_cluster(cluster_name) if domain_name: address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address'] subdomain = self._get_subdomain(cluster_name, domain_name) self.route53.create_record(domain_name, subdomain, 'CNAME', address) def _modify_redis_cluster(self, cluster_name, engine_version, parameter_group, auto_failover, domain_name, maintenance_window, apply_immediately=True): """ Modify an existing Redis replication group Args: cluster_name (str): name of cluster engine_version (str): redis version to use parameter_group (str): name of parameter group to use auto_failover (bool): True to enable automatic promotion of read only cluster after primary fails domain_name (str): Hosted zone where to create subdomain for cluster apply_immediately (bool): True to immediately update the cluster False to schedule update at next cluster maintenance window or restart maintenance_window(string): specifies the weekly time range (of at least 1 hour) in UTC during which maintenance on the cache cluster is performed. """ replication_group_id = self._get_redis_replication_group_id(cluster_name) cluster = self._get_redis_cluster(cluster_name) throttled_call(self.conn.modify_replication_group, ReplicationGroupId=replication_group_id, AutomaticFailoverEnabled=auto_failover, CacheParameterGroupName=parameter_group, ApplyImmediately=apply_immediately, EngineVersion=engine_version, PreferredMaintenanceWindow=maintenance_window) if domain_name: address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address'] self.route53.delete_records_by_value('CNAME', address) subdomain = self._get_subdomain(cluster_name, domain_name) self.route53.create_record(domain_name, subdomain, 'CNAME', address) def _create_subnet_group(self, meta_network_name): subnet_group_name = self._get_subnet_group_name(meta_network_name) meta_network = self.vpc.networks[meta_network_name] logger.info('Creating cache subnet group %s', subnet_group_name) throttled_call(self.conn.create_cache_subnet_group, CacheSubnetGroupName=subnet_group_name, CacheSubnetGroupDescription=subnet_group_name, SubnetIds=[disco_subnet.subnet_dict['SubnetId'] for disco_subnet in meta_network.disco_subnets.values()]) def _get_subnet_group(self, meta_network_name): try: response = throttled_call(self.conn.describe_cache_subnet_groups, CacheSubnetGroupName=self._get_subnet_group_name(meta_network_name)) groups = response.get('CacheSubnetGroups', []) return groups[0] if groups else None except botocore.exceptions.ClientError: return None def _get_redis_replication_group_id(self, cluster_name): """Get a unique id for a redis cluster. This will not be human readable""" # Redis Replication Groups Ids are limited to 16 characters so hash the group name to get a shorter id # Ids must also start with a letter return 'A' + hashlib.md5(self.vpc.environment_name + '-' + cluster_name).hexdigest()[:15] def _get_redis_description(self, cluster_name): """Get a human readable name for a redis cluster""" return self.vpc.environment_name + '-' + cluster_name def _get_subnet_group_name(self, meta_network_name): return self.vpc.environment_name + '-' + meta_network_name def _get_subdomain(self, cluster, domain_name): """Get the expected subdomain for a cache cluster""" return cluster + '-' + self.vpc.environment_name + '.' + domain_name def _get_option(self, cluster_name, option_name): """Get a config option for a cluster""" if not self.config: raise CommandError('ElastiCache config file missing') section_name = self.vpc.environment_name + ':' + cluster_name if not self.config.has_section(section_name): raise CommandError('%s section missing in ElastiCache config' % section_name) if self.config.has_option(section_name, option_name): return self.config.get(section_name, option_name) return None def _has_auto_failover(self, engine_version, instance_type, num_nodes): """auto failover is only supported for Redis versions >= 2.8.6 and not for t1, t2 instance types""" return ('t1.' not in instance_type and 't2.' not in instance_type and Spec('>=2.8.6').match(Version(engine_version)) and num_nodes > 1)
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2011 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package com.google.common.hash; import com.google.common.annotations.Beta; import com.google.common.base.Preconditions; import java.io.InvalidObjectException; import java.io.ObjectInputStream; import java.io.OutputStream; import java.io.Serializable; import java.nio.charset.Charset; import org.jspecify.annotations.Nullable; /** * Funnels for common types. All implementations are serializable. * * @author Dimitris Andreou * @since 11.0 */ @Beta public final class Funnels { private Funnels() {} /** Returns a funnel that extracts the bytes from a {@code byte} array. */ public static Funnel<byte[]> byteArrayFunnel() { return ByteArrayFunnel.INSTANCE; } private enum ByteArrayFunnel implements Funnel<byte[]> { INSTANCE; @Override public void funnel(byte[] from, PrimitiveSink into) { into.putBytes(from); } @Override public String toString() { return "Funnels.byteArrayFunnel()"; } } /** * Returns a funnel that extracts the characters from a {@code CharSequence}, a character at a * time, without performing any encoding. If you need to use a specific encoding, use {@link * Funnels#stringFunnel(Charset)} instead. * * @since 15.0 (since 11.0 as {@code Funnels.stringFunnel()}. */ public static Funnel<CharSequence> unencodedCharsFunnel() { return UnencodedCharsFunnel.INSTANCE; } private enum UnencodedCharsFunnel implements Funnel<CharSequence> { INSTANCE; @Override public void funnel(CharSequence from, PrimitiveSink into) { into.putUnencodedChars(from); } @Override public String toString() { return "Funnels.unencodedCharsFunnel()"; } } /** * Returns a funnel that encodes the characters of a {@code CharSequence} with the specified * {@code Charset}. * * @since 15.0 */ public static Funnel<CharSequence> stringFunnel(Charset charset) { return new StringCharsetFunnel(charset); } private static final class StringCharsetFunnel implements Funnel<CharSequence> { private final Charset charset; StringCharsetFunnel(Charset charset) { this.charset = Preconditions.checkNotNull(charset); } @Override public void funnel(CharSequence from, PrimitiveSink into) { into.putString(from, charset); } @Override public String toString() { return "Funnels.stringFunnel(" + charset.name() + ")"; } @Override public boolean equals(@Nullable Object o) { if (o instanceof StringCharsetFunnel) { StringCharsetFunnel funnel = (StringCharsetFunnel) o; return this.charset.equals(funnel.charset); } return false; } @Override public int hashCode() { return StringCharsetFunnel.class.hashCode() ^ charset.hashCode(); } Object writeReplace() { return new SerializedForm(charset); } private void readObject(ObjectInputStream stream) throws InvalidObjectException { throw new InvalidObjectException("Use SerializedForm"); } private static final class SerializedForm implements Serializable { private final String charsetCanonicalName; SerializedForm(Charset charset) { this.charsetCanonicalName = charset.name(); } private Object readResolve() { return stringFunnel(Charset.forName(charsetCanonicalName)); } private static final long serialVersionUID = 0; } } /** * Returns a funnel for integers. * * @since 13.0 */ public static Funnel<Integer> integerFunnel() { return IntegerFunnel.INSTANCE; } private enum IntegerFunnel implements Funnel<Integer> { INSTANCE; @Override public void funnel(Integer from, PrimitiveSink into) { into.putInt(from); } @Override public String toString() { return "Funnels.integerFunnel()"; } } /** * Returns a funnel that processes an {@code Iterable} by funneling its elements in iteration * order with the specified funnel. No separators are added between the elements. * * @since 15.0 */ public static <E extends @Nullable Object> Funnel<Iterable<? extends E>> sequentialFunnel( Funnel<E> elementFunnel) { return new SequentialFunnel<>(elementFunnel); } private static final class SequentialFunnel<E extends @Nullable Object> implements Funnel<Iterable<? extends E>> { private final Funnel<E> elementFunnel; SequentialFunnel(Funnel<E> elementFunnel) { this.elementFunnel = Preconditions.checkNotNull(elementFunnel); } @Override public void funnel(Iterable<? extends E> from, PrimitiveSink into) { for (E e : from) { elementFunnel.funnel(e, into); } } @Override public String toString() { return "Funnels.sequentialFunnel(" + elementFunnel + ")"; } @Override public boolean equals(@Nullable Object o) { if (o instanceof SequentialFunnel) { SequentialFunnel<?> funnel = (SequentialFunnel<?>) o; return elementFunnel.equals(funnel.elementFunnel); } return false; } @Override public int hashCode() { return SequentialFunnel.class.hashCode() ^ elementFunnel.hashCode(); } } /** * Returns a funnel for longs. * * @since 13.0 */ public static Funnel<Long> longFunnel() { return LongFunnel.INSTANCE; } private enum LongFunnel implements Funnel<Long> { INSTANCE; @Override public void funnel(Long from, PrimitiveSink into) { into.putLong(from); } @Override public String toString() { return "Funnels.longFunnel()"; } } /** * Wraps a {@code PrimitiveSink} as an {@link OutputStream}, so it is easy to {@link Funnel#funnel * funnel} an object to a {@code PrimitiveSink} if there is already a way to write the contents of * the object to an {@code OutputStream}. * * <p>The {@code close} and {@code flush} methods of the returned {@code OutputStream} do nothing, * and no method throws {@code IOException}. * * @since 13.0 */ public static OutputStream asOutputStream(PrimitiveSink sink) { return new SinkAsStream(sink); } private static final class SinkAsStream extends OutputStream { final PrimitiveSink sink; SinkAsStream(PrimitiveSink sink) { this.sink = Preconditions.checkNotNull(sink); } @Override public void write(int b) { sink.putByte((byte) b); } @Override public void write(byte[] bytes) { sink.putBytes(bytes); } @Override public void write(byte[] bytes, int off, int len) { sink.putBytes(bytes, off, len); } @Override public String toString() { return "Funnels.asOutputStream(" + sink + ")"; } } }
java
github
https://github.com/google/guava
android/guava/src/com/google/common/hash/Funnels.java
/*[clinic input] preserve [clinic start generated code]*/ #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) # include "pycore_gc.h" // PyGC_Head # include "pycore_runtime.h" // _Py_ID() #endif #include "pycore_modsupport.h" // _PyArg_UnpackKeywords() PyDoc_STRVAR(_symtable_symtable__doc__, "symtable($module, source, filename, startstr, /, *, module=None)\n" "--\n" "\n" "Return symbol and scope dictionaries used internally by compiler."); #define _SYMTABLE_SYMTABLE_METHODDEF \ {"symtable", _PyCFunction_CAST(_symtable_symtable), METH_FASTCALL|METH_KEYWORDS, _symtable_symtable__doc__}, static PyObject * _symtable_symtable_impl(PyObject *module, PyObject *source, PyObject *filename, const char *startstr, PyObject *modname); static PyObject * _symtable_symtable(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { PyObject *return_value = NULL; #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) #define NUM_KEYWORDS 1 static struct { PyGC_Head _this_is_not_used; PyObject_VAR_HEAD Py_hash_t ob_hash; PyObject *ob_item[NUM_KEYWORDS]; } _kwtuple = { .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) .ob_hash = -1, .ob_item = { &_Py_ID(module), }, }; #undef NUM_KEYWORDS #define KWTUPLE (&_kwtuple.ob_base.ob_base) #else // !Py_BUILD_CORE # define KWTUPLE NULL #endif // !Py_BUILD_CORE static const char * const _keywords[] = {"", "", "", "module", NULL}; static _PyArg_Parser _parser = { .keywords = _keywords, .fname = "symtable", .kwtuple = KWTUPLE, }; #undef KWTUPLE PyObject *argsbuf[4]; Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 3; PyObject *source; PyObject *filename = NULL; const char *startstr; PyObject *modname = Py_None; args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, /*minpos*/ 3, /*maxpos*/ 3, /*minkw*/ 0, /*varpos*/ 0, argsbuf); if (!args) { goto exit; } source = args[0]; if (!PyUnicode_FSDecoder(args[1], &filename)) { goto exit; } if (!PyUnicode_Check(args[2])) { _PyArg_BadArgument("symtable", "argument 3", "str", args[2]); goto exit; } Py_ssize_t startstr_length; startstr = PyUnicode_AsUTF8AndSize(args[2], &startstr_length); if (startstr == NULL) { goto exit; } if (strlen(startstr) != (size_t)startstr_length) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } if (!noptargs) { goto skip_optional_kwonly; } modname = args[3]; skip_optional_kwonly: return_value = _symtable_symtable_impl(module, source, filename, startstr, modname); exit: /* Cleanup for filename */ Py_XDECREF(filename); return return_value; } /*[clinic end generated code: output=0137be60c487c841 input=a9049054013a1b77]*/
c
github
https://github.com/python/cpython
Modules/clinic/symtablemodule.c.h
import esp class FlashBdev: SEC_SIZE = 4096 def __init__(self, start_sec, blocks): self.start_sec = start_sec self.blocks = blocks def readblocks(self, n, buf, off=0): # print("readblocks(%s, %x(%d), %d)" % (n, id(buf), len(buf), off)) esp.flash_read((n + self.start_sec) * self.SEC_SIZE + off, buf) def writeblocks(self, n, buf, off=None): # print("writeblocks(%s, %x(%d), %d)" % (n, id(buf), len(buf), off)) # assert len(buf) <= self.SEC_SIZE, len(buf) if off is None: esp.flash_erase(n + self.start_sec) off = 0 esp.flash_write((n + self.start_sec) * self.SEC_SIZE + off, buf) def ioctl(self, op, arg): # print("ioctl(%d, %r)" % (op, arg)) if op == 4: # MP_BLOCKDEV_IOCTL_BLOCK_COUNT return self.blocks if op == 5: # MP_BLOCKDEV_IOCTL_BLOCK_SIZE return self.SEC_SIZE if op == 6: # MP_BLOCKDEV_IOCTL_BLOCK_ERASE esp.flash_erase(arg + self.start_sec) return 0 size = esp.flash_size() if size < 1024 * 1024: bdev = None else: start_sec = esp.flash_user_start() // FlashBdev.SEC_SIZE if start_sec < 256: start_sec += 1 # Reserve space for native code # 20K at the flash end is reserved for SDK params storage bdev = FlashBdev(start_sec, (size - 20480) // FlashBdev.SEC_SIZE - start_sec)
unknown
codeparrot/codeparrot-clean
""" Machine limits for Float32 and Float64 and (long double) if available... """ __all__ = ['finfo','iinfo'] from machar import MachAr import numeric import numerictypes as ntypes from numeric import array def _frz(a): """fix rank-0 --> rank-1""" if a.ndim == 0: a.shape = (1,) return a _convert_to_float = { ntypes.csingle: ntypes.single, ntypes.complex_: ntypes.float_, ntypes.clongfloat: ntypes.longfloat } class finfo(object): """ finfo(dtype) Machine limits for floating point types. Attributes ---------- eps : floating point number of the appropriate type The smallest representable number such that ``1.0 + eps != 1.0``. epsneg : floating point number of the appropriate type The smallest representable number such that ``1.0 - epsneg != 1.0``. iexp : int The number of bits in the exponent portion of the floating point representation. machar : MachAr The object which calculated these parameters and holds more detailed information. machep : int The exponent that yields ``eps``. max : floating point number of the appropriate type The largest representable number. maxexp : int The smallest positive power of the base (2) that causes overflow. min : floating point number of the appropriate type The smallest representable number, typically ``-max``. minexp : int The most negative power of the base (2) consistent with there being no leading 0's in the mantissa. negep : int The exponent that yields ``epsneg``. nexp : int The number of bits in the exponent including its sign and bias. nmant : int The number of bits in the mantissa. precision : int The approximate number of decimal digits to which this kind of float is precise. resolution : floating point number of the appropriate type The approximate decimal resolution of this type, i.e. ``10**-precision``. tiny : floating point number of the appropriate type The smallest-magnitude usable number. Parameters ---------- dtype : floating point type, dtype, or instance The kind of floating point data type to get information about. See Also -------- MachAr : The implementation of the tests that produce this information. iinfo : The equivalent for integer data types. Notes ----- For developers of NumPy: do not instantiate this at the module level. The initial calculation of these parameters is expensive and negatively impacts import times. These objects are cached, so calling ``finfo()`` repeatedly inside your functions is not a problem. """ _finfo_cache = {} def __new__(cls, dtype): try: dtype = numeric.dtype(dtype) except TypeError: # In case a float instance was given dtype = numeric.dtype(type(dtype)) obj = cls._finfo_cache.get(dtype,None) if obj is not None: return obj dtypes = [dtype] newdtype = numeric.obj2sctype(dtype) if newdtype is not dtype: dtypes.append(newdtype) dtype = newdtype if not issubclass(dtype, numeric.inexact): raise ValueError, "data type %r not inexact" % (dtype) obj = cls._finfo_cache.get(dtype,None) if obj is not None: return obj if not issubclass(dtype, numeric.floating): newdtype = _convert_to_float[dtype] if newdtype is not dtype: dtypes.append(newdtype) dtype = newdtype obj = cls._finfo_cache.get(dtype,None) if obj is not None: return obj obj = object.__new__(cls)._init(dtype) for dt in dtypes: cls._finfo_cache[dt] = obj return obj def _init(self, dtype): self.dtype = numeric.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = '%24.16e' precname = 'double' elif dtype is ntypes.single: itype = ntypes.int32 fmt = '%15.7e' precname = 'single' elif dtype is ntypes.longdouble: itype = ntypes.longlong fmt = '%s' precname = 'long double' else: raise ValueError, repr(dtype) machar = MachAr(lambda v:array([v], dtype), lambda v:_frz(v.astype(itype))[0], lambda v:array(_frz(v)[0], dtype), lambda v: fmt % array(_frz(v)[0], dtype), 'numpy %s precision floating point number' % precname) for word in ['precision', 'iexp', 'maxexp','minexp','negep', 'machep']: setattr(self,word,getattr(machar, word)) for word in ['tiny','resolution','epsneg']: setattr(self,word,getattr(machar, word).squeeze()) self.max = machar.huge.flat[0] self.min = -self.max self.eps = machar.eps.flat[0] self.nexp = machar.iexp self.nmant = machar.it self.machar = machar self._str_tiny = machar._str_xmin.strip() self._str_max = machar._str_xmax.strip() self._str_epsneg = machar._str_epsneg.strip() self._str_eps = machar._str_eps.strip() self._str_resolution = machar._str_resolution.strip() return self def __str__(self): return '''\ Machine parameters for %(dtype)s --------------------------------------------------------------------- precision=%(precision)3s resolution= %(_str_resolution)s machep=%(machep)6s eps= %(_str_eps)s negep =%(negep)6s epsneg= %(_str_epsneg)s minexp=%(minexp)6s tiny= %(_str_tiny)s maxexp=%(maxexp)6s max= %(_str_max)s nexp =%(nexp)6s min= -max --------------------------------------------------------------------- ''' % self.__dict__ class iinfo: """ iinfo(type) Machine limits for integer types. Attributes ---------- min : int The smallest integer expressible by the type. max : int The largest integer expressible by the type. Parameters ---------- type : integer type, dtype, or instance The kind of integer data type to get information about. See Also -------- finfo : The equivalent for floating point data types. Examples -------- With types: >>> ii16 = np.iinfo(np.int16) >>> ii16.min -32768 >>> ii16.max 32767 >>> ii32 = np.iinfo(np.int32) >>> ii32.min -2147483648 >>> ii32.max 2147483647 With instances: >>> ii32 = np.iinfo(np.int32(10)) >>> ii32.min -2147483648 >>> ii32.max 2147483647 """ _min_vals = {} _max_vals = {} def __init__(self, int_type): try: self.dtype = numeric.dtype(int_type) except TypeError: self.dtype = numeric.dtype(type(int_type)) self.kind = self.dtype.kind self.bits = self.dtype.itemsize * 8 self.key = "%s%d" % (self.kind, self.bits) if not self.kind in 'iu': raise ValueError("Invalid integer data type.") def min(self): """Minimum value of given dtype.""" if self.kind == 'u': return 0 else: try: val = iinfo._min_vals[self.key] except KeyError: val = int(-(1L << (self.bits-1))) iinfo._min_vals[self.key] = val return val min = property(min) def max(self): """Maximum value of given dtype.""" try: val = iinfo._max_vals[self.key] except KeyError: if self.kind == 'u': val = int((1L << self.bits) - 1) else: val = int((1L << (self.bits-1)) - 1) iinfo._max_vals[self.key] = val return val max = property(max) def __str__(self): """String representation.""" return '''\ Machine parameters for %(dtype)s --------------------------------------------------------------------- min = %(min)s max = %(max)s --------------------------------------------------------------------- ''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} if __name__ == '__main__': f = finfo(ntypes.single) print 'single epsilon:',f.eps print 'single tiny:',f.tiny f = finfo(ntypes.float) print 'float epsilon:',f.eps print 'float tiny:',f.tiny f = finfo(ntypes.longfloat) print 'longfloat epsilon:',f.eps print 'longfloat tiny:',f.tiny
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.testcontainers.beans; import org.jspecify.annotations.Nullable; import org.springframework.beans.factory.config.BeanDefinition; import org.springframework.core.annotation.MergedAnnotations; /** * Extended {@link org.springframework.beans.factory.config.BeanDefinition} interface used * to register testcontainer beans. * * @author Phillip Webb * @since 3.1.0 */ public interface TestcontainerBeanDefinition extends BeanDefinition { /** * Return the container image name or {@code null} if the image name is not yet known. * @return the container image name */ @Nullable String getContainerImageName(); /** * Return any annotations declared alongside the container. * @return annotations declared with the container */ MergedAnnotations getAnnotations(); }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-testcontainers/src/main/java/org/springframework/boot/testcontainers/beans/TestcontainerBeanDefinition.java
""" There are three types of functions implemented in SymPy: 1) defined functions (in the sense that they can be evaluated) like exp or sin; they have a name and a body: f = exp 2) undefined function which have a name but no body. Undefined functions can be defined using a Function class as follows: f = Function('f') (the result will be a Function instance) 3) anonymous function (or lambda function) which have a body (defined with dummy variables) but have no name: f = Lambda(x, exp(x)*x) f = Lambda((x, y), exp(x)*y) The fourth type of functions are composites, like (sin + cos)(x); these work in SymPy core, but are not yet part of SymPy. Examples ======== >>> import sympy >>> f = sympy.Function("f") >>> from sympy.abc import x >>> f(x) f(x) >>> print(sympy.srepr(f(x).func)) Function('f') >>> f(x).args (x,) """ from __future__ import print_function, division from .add import Add from .assumptions import ManagedProperties from .basic import Basic from .cache import cacheit from .compatibility import iterable, is_sequence, as_int, ordered from .core import BasicMeta from .decorators import _sympifyit from .expr import Expr, AtomicExpr from .numbers import Rational, Float from .operations import LatticeOp from .rules import Transform from .singleton import S from .sympify import sympify from sympy.core.containers import Tuple, Dict from sympy.core.logic import fuzzy_and from sympy.core.compatibility import string_types, with_metaclass, range from sympy.utilities import default_sort_key from sympy.utilities.iterables import uniq from sympy.core.evaluate import global_evaluate import mpmath import mpmath.libmp as mlib import inspect def _coeff_isneg(a): """Return True if the leading Number is negative. Examples ======== >>> from sympy.core.function import _coeff_isneg >>> from sympy import S, Symbol, oo, pi >>> _coeff_isneg(-3*pi) True >>> _coeff_isneg(S(3)) False >>> _coeff_isneg(-oo) True >>> _coeff_isneg(Symbol('n', negative=True)) # coeff is 1 False """ if a.is_Mul: a = a.args[0] return a.is_Number and a.is_negative class PoleError(Exception): pass class ArgumentIndexError(ValueError): def __str__(self): return ("Invalid operation with argument number %s for Function %s" % (self.args[1], self.args[0])) class FunctionClass(ManagedProperties): """ Base class for function classes. FunctionClass is a subclass of type. Use Function('<function name>' [ , signature ]) to create undefined function classes. """ _new = type.__new__ def __init__(cls, *args, **kwargs): if hasattr(cls, 'eval'): evalargspec = inspect.getargspec(cls.eval) if evalargspec.varargs: evalargs = None else: evalargs = len(evalargspec.args) - 1 # subtract 1 for cls if evalargspec.defaults: # if there are default args then they are optional; the # fewest args will occur when all defaults are used and # the most when none are used (i.e. all args are given) evalargs = tuple(range( evalargs - len(evalargspec.defaults), evalargs + 1)) else: evalargs = None # honor kwarg value or class-defined value before using # the number of arguments in the eval function (if present) nargs = kwargs.pop('nargs', cls.__dict__.get('nargs', evalargs)) super(FunctionClass, cls).__init__(args, kwargs) # Canonicalize nargs here; change to set in nargs. if is_sequence(nargs): nargs = tuple(ordered(set(nargs))) elif nargs is not None: nargs = (as_int(nargs),) cls._nargs = nargs @property def nargs(self): """Return a set of the allowed number of arguments for the function. Examples ======== >>> from sympy.core.function import Function >>> from sympy.abc import x, y >>> f = Function('f') If the function can take any number of arguments, the set of whole numbers is returned: >>> Function('f').nargs Naturals0() If the function was initialized to accept one or more arguments, a corresponding set will be returned: >>> Function('f', nargs=1).nargs {1} >>> Function('f', nargs=(2, 1)).nargs {1, 2} The undefined function, after application, also has the nargs attribute; the actual number of arguments is always available by checking the ``args`` attribute: >>> f = Function('f') >>> f(1).nargs Naturals0() >>> len(f(1).args) 1 """ from sympy.sets.sets import FiniteSet # XXX it would be nice to handle this in __init__ but there are import # problems with trying to import FiniteSet there return FiniteSet(*self._nargs) if self._nargs else S.Naturals0 def __repr__(cls): return cls.__name__ class Application(with_metaclass(FunctionClass, Basic)): """ Base class for applied functions. Instances of Application represent the result of applying an application of any type to any object. """ is_Function = True @cacheit def __new__(cls, *args, **options): from sympy.sets.fancysets import Naturals0 from sympy.sets.sets import FiniteSet args = list(map(sympify, args)) evaluate = options.pop('evaluate', global_evaluate[0]) # WildFunction (and anything else like it) may have nargs defined # and we throw that value away here options.pop('nargs', None) if options: raise ValueError("Unknown options: %s" % options) if evaluate: evaluated = cls.eval(*args) if evaluated is not None: return evaluated obj = super(Application, cls).__new__(cls, *args, **options) # make nargs uniform here try: # things passing through here: # - functions subclassed from Function (e.g. myfunc(1).nargs) # - functions like cos(1).nargs # - AppliedUndef with given nargs like Function('f', nargs=1)(1).nargs # Canonicalize nargs here; change to set in nargs. if is_sequence(obj.nargs): obj.nargs = tuple(ordered(set(obj.nargs))) elif obj.nargs is not None: obj.nargs = (as_int(obj.nargs),) obj.nargs = FiniteSet(*obj.nargs) if obj.nargs is not None \ else Naturals0() except AttributeError: # things passing through here: # - WildFunction('f').nargs # - AppliedUndef with no nargs like Function('f')(1).nargs obj.nargs = FiniteSet(*obj._nargs) if obj._nargs is not None \ else Naturals0() return obj @classmethod def eval(cls, *args): """ Returns a canonical form of cls applied to arguments args. The eval() method is called when the class cls is about to be instantiated and it should return either some simplified instance (possible of some other class), or if the class cls should be unmodified, return None. Examples of eval() for the function "sign" --------------------------------------------- @classmethod def eval(cls, arg): if arg is S.NaN: return S.NaN if arg is S.Zero: return S.Zero if arg.is_positive: return S.One if arg.is_negative: return S.NegativeOne if isinstance(arg, Mul): coeff, terms = arg.as_coeff_Mul(rational=True) if coeff is not S.One: return cls(coeff) * cls(terms) """ return @property def func(self): return self.__class__ def _eval_subs(self, old, new): if (old.is_Function and new.is_Function and old == self.func and len(self.args) in new.nargs): return new(*self.args) class Function(Application, Expr): """Base class for applied mathematical functions. It also serves as a constructor for undefined function classes. Examples ======== First example shows how to use Function as a constructor for undefined function classes: >>> from sympy import Function, Symbol >>> x = Symbol('x') >>> f = Function('f') >>> g = Function('g')(x) >>> f f >>> f(x) f(x) >>> g g(x) >>> f(x).diff(x) Derivative(f(x), x) >>> g.diff(x) Derivative(g(x), x) In the following example Function is used as a base class for ``my_func`` that represents a mathematical function *my_func*. Suppose that it is well known, that *my_func(0)* is *1* and *my_func* at infinity goes to *0*, so we want those two simplifications to occur automatically. Suppose also that *my_func(x)* is real exactly when *x* is real. Here is an implementation that honours those requirements: >>> from sympy import Function, S, oo, I, sin >>> class my_func(Function): ... ... @classmethod ... def eval(cls, x): ... if x.is_Number: ... if x is S.Zero: ... return S.One ... elif x is S.Infinity: ... return S.Zero ... ... def _eval_is_real(self): ... return self.args[0].is_real ... >>> x = S('x') >>> my_func(0) + sin(0) 1 >>> my_func(oo) 0 >>> my_func(3.54).n() # Not yet implemented for my_func. my_func(3.54) >>> my_func(I).is_real False In order for ``my_func`` to become useful, several other methods would need to be implemented. See source code of some of the already implemented functions for more complete examples. Also, if the function can take more than one argument, then ``nargs`` must be defined, e.g. if ``my_func`` can take one or two arguments then, >>> class my_func(Function): ... nargs = (1, 2) ... >>> """ @property def _diff_wrt(self): """Allow derivatives wrt functions. Examples ======== >>> from sympy import Function, Symbol >>> f = Function('f') >>> x = Symbol('x') >>> f(x)._diff_wrt True """ return True @cacheit def __new__(cls, *args, **options): # Handle calls like Function('f') if cls is Function: return UndefinedFunction(*args, **options) n = len(args) if n not in cls.nargs: # XXX: exception message must be in exactly this format to # make it work with NumPy's functions like vectorize(). See, # for example, https://github.com/numpy/numpy/issues/1697. # The ideal solution would be just to attach metadata to # the exception and change NumPy to take advantage of this. temp = ('%(name)s takes %(qual)s %(args)s ' 'argument%(plural)s (%(given)s given)') raise TypeError(temp % { 'name': cls, 'qual': 'exactly' if len(cls.nargs) == 1 else 'at least', 'args': min(cls.nargs), 'plural': 's'*(min(cls.nargs) != 1), 'given': n}) evaluate = options.get('evaluate', global_evaluate[0]) result = super(Function, cls).__new__(cls, *args, **options) if not evaluate or not isinstance(result, cls): return result pr = max(cls._should_evalf(a) for a in result.args) pr2 = min(cls._should_evalf(a) for a in result.args) if pr2 > 0: return result.evalf(mlib.libmpf.prec_to_dps(pr)) return result @classmethod def _should_evalf(cls, arg): """ Decide if the function should automatically evalf(). By default (in this implementation), this happens if (and only if) the ARG is a floating point number. This function is used by __new__. Returns the precision to evalf to, or -1 if it shouldn't evalf. """ from sympy.core.symbol import Wild if arg.is_Float: return arg._prec if not arg.is_Add: return -1 # Don't use as_real_imag() here, that's too much work a, b = Wild('a'), Wild('b') m = arg.match(a + b*S.ImaginaryUnit) if not m or not (m[a].is_Float or m[b].is_Float): return -1 l = [m[i]._prec for i in m if m[i].is_Float] l.append(-1) return max(l) @classmethod def class_key(cls): from sympy.sets.fancysets import Naturals0 funcs = { 'exp': 10, 'log': 11, 'sin': 20, 'cos': 21, 'tan': 22, 'cot': 23, 'sinh': 30, 'cosh': 31, 'tanh': 32, 'coth': 33, 'conjugate': 40, 're': 41, 'im': 42, 'arg': 43, } name = cls.__name__ try: i = funcs[name] except KeyError: i = 0 if isinstance(cls.nargs, Naturals0) else 10000 return 4, i, name @property def is_commutative(self): """ Returns whether the functon is commutative. """ if all(getattr(t, 'is_commutative') for t in self.args): return True else: return False def _eval_evalf(self, prec): # Lookup mpmath function based on name fname = self.func.__name__ try: if not hasattr(mpmath, fname): from sympy.utilities.lambdify import MPMATH_TRANSLATIONS fname = MPMATH_TRANSLATIONS[fname] func = getattr(mpmath, fname) except (AttributeError, KeyError): try: return Float(self._imp_(*self.args), prec) except (AttributeError, TypeError): return # Convert all args to mpf or mpc # Convert the arguments to *higher* precision than requested for the # final result. # XXX + 5 is a guess, it is similar to what is used in evalf.py. Should # we be more intelligent about it? try: args = [arg._to_mpmath(prec + 5) for arg in self.args] def bad(m): from mpmath import mpf, mpc # the precision of an mpf value is the last element # if that is 1 (and m[1] is not 1 which would indicate a # power of 2), then the eval failed; so check that none of # the arguments failed to compute to a finite precision. # Note: An mpc value has two parts, the re and imag tuple; # check each of those parts, too. Anything else is allowed to # pass if isinstance(m, mpf): m = m._mpf_ return m[1] !=1 and m[-1] == 1 elif isinstance(m, mpc): m, n = m._mpc_ return m[1] !=1 and m[-1] == 1 and \ n[1] !=1 and n[-1] == 1 else: return False if any(bad(a) for a in args): raise ValueError # one or more args failed to compute with significance except ValueError: return with mpmath.workprec(prec): v = func(*args) return Expr._from_mpmath(v, prec) def _eval_derivative(self, s): # f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s) i = 0 l = [] for a in self.args: i += 1 da = a.diff(s) if da is S.Zero: continue try: df = self.fdiff(i) except ArgumentIndexError: df = Function.fdiff(self, i) l.append(df * da) return Add(*l) def _eval_is_commutative(self): return fuzzy_and(a.is_commutative for a in self.args) def _eval_is_complex(self): return fuzzy_and(a.is_complex for a in self.args) def as_base_exp(self): """ Returns the method as the 2-tuple (base, exponent). """ return self, S.One def _eval_aseries(self, n, args0, x, logx): """ Compute an asymptotic expansion around args0, in terms of self.args. This function is only used internally by _eval_nseries and should not be called directly; derived classes can overwrite this to implement asymptotic expansions. """ from sympy.utilities.misc import filldedent raise PoleError(filldedent(''' Asymptotic expansion of %s around %s is not implemented.''' % (type(self), args0))) def _eval_nseries(self, x, n, logx): """ This function does compute series for multivariate functions, but the expansion is always in terms of *one* variable. Examples ======== >>> from sympy import atan2 >>> from sympy.abc import x, y >>> atan2(x, y).series(x, n=2) atan2(0, y) + x/y + O(x**2) >>> atan2(x, y).series(y, n=2) -y/x + atan2(x, 0) + O(y**2) This function also computes asymptotic expansions, if necessary and possible: >>> from sympy import loggamma >>> loggamma(1/x)._eval_nseries(x,0,None) -1/x - log(x)/x + log(x)/2 + O(1) """ from sympy import Order from sympy.sets.sets import FiniteSet args = self.args args0 = [t.limit(x, 0) for t in args] if any(t.is_finite is False for t in args0): from sympy import oo, zoo, nan # XXX could use t.as_leading_term(x) here but it's a little # slower a = [t.compute_leading_term(x, logx=logx) for t in args] a0 = [t.limit(x, 0) for t in a] if any([t.has(oo, -oo, zoo, nan) for t in a0]): return self._eval_aseries(n, args0, x, logx) # Careful: the argument goes to oo, but only logarithmically so. We # are supposed to do a power series expansion "around the # logarithmic term". e.g. # f(1+x+log(x)) # -> f(1+logx) + x*f'(1+logx) + O(x**2) # where 'logx' is given in the argument a = [t._eval_nseries(x, n, logx) for t in args] z = [r - r0 for (r, r0) in zip(a, a0)] p = [Dummy() for t in z] q = [] v = None for ai, zi, pi in zip(a0, z, p): if zi.has(x): if v is not None: raise NotImplementedError q.append(ai + pi) v = pi else: q.append(ai) e1 = self.func(*q) if v is None: return e1 s = e1._eval_nseries(v, n, logx) o = s.getO() s = s.removeO() s = s.subs(v, zi).expand() + Order(o.expr.subs(v, zi), x) return s if (self.func.nargs is S.Naturals0 or (self.func.nargs == FiniteSet(1) and args0[0]) or any(c > 1 for c in self.func.nargs)): e = self e1 = e.expand() if e == e1: #for example when e = sin(x+1) or e = sin(cos(x)) #let's try the general algorithm term = e.subs(x, S.Zero) if term.is_finite is False or term is S.NaN: raise PoleError("Cannot expand %s around 0" % (self)) series = term fact = S.One _x = Dummy('x') e = e.subs(x, _x) for i in range(n - 1): i += 1 fact *= Rational(i) e = e.diff(_x) subs = e.subs(_x, S.Zero) if subs is S.NaN: # try to evaluate a limit if we have to subs = e.limit(_x, S.Zero) if subs.is_finite is False: raise PoleError("Cannot expand %s around 0" % (self)) term = subs*(x**i)/fact term = term.expand() series += term return series + Order(x**n, x) return e1.nseries(x, n=n, logx=logx) arg = self.args[0] l = [] g = None # try to predict a number of terms needed nterms = n + 2 cf = Order(arg.as_leading_term(x), x).getn() if cf != 0: nterms = int(nterms / cf) for i in range(nterms): g = self.taylor_term(i, arg, g) g = g.nseries(x, n=n, logx=logx) l.append(g) return Add(*l) + Order(x**n, x) def fdiff(self, argindex=1): """ Returns the first derivative of the function. """ if not (1 <= argindex <= len(self.args)): raise ArgumentIndexError(self, argindex) if not self.args[argindex - 1].is_Symbol: # See issue 4624 and issue 4719 and issue 5600 arg_dummy = Dummy('xi_%i' % argindex) arg_dummy.dummy_index = hash(self.args[argindex - 1]) return Subs(Derivative( self.subs(self.args[argindex - 1], arg_dummy), arg_dummy), arg_dummy, self.args[argindex - 1]) return Derivative(self, self.args[argindex - 1], evaluate=False) def _eval_as_leading_term(self, x): """Stub that should be overridden by new Functions to return the first non-zero term in a series if ever an x-dependent argument whose leading term vanishes as x -> 0 might be encountered. See, for example, cos._eval_as_leading_term. """ from sympy import Order args = [a.as_leading_term(x) for a in self.args] o = Order(1, x) if any(x in a.free_symbols and o.contains(a) for a in args): # Whereas x and any finite number are contained in O(1, x), # expressions like 1/x are not. If any arg simplified to a # vanishing expression as x -> 0 (like x or x**2, but not # 3, 1/x, etc...) then the _eval_as_leading_term is needed # to supply the first non-zero term of the series, # # e.g. expression leading term # ---------- ------------ # cos(1/x) cos(1/x) # cos(cos(x)) cos(1) # cos(x) 1 <- _eval_as_leading_term needed # sin(x) x <- _eval_as_leading_term needed # raise NotImplementedError( '%s has no _eval_as_leading_term routine' % self.func) else: return self.func(*args) def _sage_(self): import sage.all as sage fname = self.func.__name__ func = getattr(sage, fname) args = [arg._sage_() for arg in self.args] return func(*args) class AppliedUndef(Function): """ Base class for expressions resulting from the application of an undefined function. """ def __new__(cls, *args, **options): args = list(map(sympify, args)) obj = super(AppliedUndef, cls).__new__(cls, *args, **options) return obj def _eval_as_leading_term(self, x): return self def _sage_(self): import sage.all as sage fname = str(self.func) args = [arg._sage_() for arg in self.args] func = sage.function(fname, *args) return func class UndefinedFunction(FunctionClass): """ The (meta)class of undefined functions. """ def __new__(mcl, name, **kwargs): ret = BasicMeta.__new__(mcl, name, (AppliedUndef,), kwargs) ret.__module__ = None return ret def __instancecheck__(cls, instance): return cls in type(instance).__mro__ UndefinedFunction.__eq__ = lambda s, o: (isinstance(o, s.__class__) and (s.class_key() == o.class_key())) class WildFunction(Function, AtomicExpr): """ A WildFunction function matches any function (with its arguments). Examples ======== >>> from sympy import WildFunction, Function, cos >>> from sympy.abc import x, y >>> F = WildFunction('F') >>> f = Function('f') >>> F.nargs Naturals0() >>> x.match(F) >>> F.match(F) {F_: F_} >>> f(x).match(F) {F_: f(x)} >>> cos(x).match(F) {F_: cos(x)} >>> f(x, y).match(F) {F_: f(x, y)} To match functions with a given number of arguments, set ``nargs`` to the desired value at instantiation: >>> F = WildFunction('F', nargs=2) >>> F.nargs {2} >>> f(x).match(F) >>> f(x, y).match(F) {F_: f(x, y)} To match functions with a range of arguments, set ``nargs`` to a tuple containing the desired number of arguments, e.g. if ``nargs = (1, 2)`` then functions with 1 or 2 arguments will be matched. >>> F = WildFunction('F', nargs=(1, 2)) >>> F.nargs {1, 2} >>> f(x).match(F) {F_: f(x)} >>> f(x, y).match(F) {F_: f(x, y)} >>> f(x, y, 1).match(F) """ include = set() def __init__(cls, name, **assumptions): from sympy.sets.sets import Set, FiniteSet cls.name = name nargs = assumptions.pop('nargs', S.Naturals0) if not isinstance(nargs, Set): # Canonicalize nargs here. See also FunctionClass. if is_sequence(nargs): nargs = tuple(ordered(set(nargs))) elif nargs is not None: nargs = (as_int(nargs),) nargs = FiniteSet(*nargs) cls.nargs = nargs def matches(self, expr, repl_dict={}, old=False): if not isinstance(expr, (AppliedUndef, Function)): return None if len(expr.args) not in self.nargs: return None repl_dict = repl_dict.copy() repl_dict[self] = expr return repl_dict class Derivative(Expr): """ Carries out differentiation of the given expression with respect to symbols. expr must define ._eval_derivative(symbol) method that returns the differentiation result. This function only needs to consider the non-trivial case where expr contains symbol and it should call the diff() method internally (not _eval_derivative); Derivative should be the only one to call _eval_derivative. Simplification of high-order derivatives: Because there can be a significant amount of simplification that can be done when multiple differentiations are performed, results will be automatically simplified in a fairly conservative fashion unless the keyword ``simplify`` is set to False. >>> from sympy import sqrt, diff >>> from sympy.abc import x >>> e = sqrt((x + 1)**2 + x) >>> diff(e, x, 5, simplify=False).count_ops() 136 >>> diff(e, x, 5).count_ops() 30 Ordering of variables: If evaluate is set to True and the expression can not be evaluated, the list of differentiation symbols will be sorted, that is, the expression is assumed to have continuous derivatives up to the order asked. This sorting assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols commute, but Symbol and non-Symbol derivatives don't commute with each other. Derivative wrt non-Symbols: This class also allows derivatives wrt non-Symbols that have _diff_wrt set to True, such as Function and Derivative. When a derivative wrt a non- Symbol is attempted, the non-Symbol is temporarily converted to a Symbol while the differentiation is performed. Note that this may seem strange, that Derivative allows things like f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for allowing this syntax is to make it easier to work with variational calculus (i.e., the Euler-Lagrange method). The best way to understand this is that the action of derivative with respect to a non-Symbol is defined by the above description: the object is substituted for a Symbol and the derivative is taken with respect to that. This action is only allowed for objects for which this can be done unambiguously, for example Function and Derivative objects. Note that this leads to what may appear to be mathematically inconsistent results. For example:: >>> from sympy import cos, sin, sqrt >>> from sympy.abc import x >>> (2*cos(x)).diff(cos(x)) 2 >>> (2*sqrt(1 - sin(x)**2)).diff(cos(x)) 0 This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are identically equal. However this is the wrong way to think of this. Think of it instead as if we have something like this:: >>> from sympy.abc import c, s >>> def F(u): ... return 2*u ... >>> def G(u): ... return 2*sqrt(1 - u**2) ... >>> F(cos(x)) 2*cos(x) >>> G(sin(x)) 2*sqrt(-sin(x)**2 + 1) >>> F(c).diff(c) 2 >>> F(c).diff(c) 2 >>> G(s).diff(c) 0 >>> G(sin(x)).diff(cos(x)) 0 Here, the Symbols c and s act just like the functions cos(x) and sin(x), respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at* c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at* s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we define the function first and evaluate it at the function, but we can actually unambiguously do this in reverse in SymPy, because expr.subs(Function, Symbol) is well-defined: just structurally replace the function everywhere it appears in the expression. This is the same notational convenience used in the Euler-Lagrange method when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant is that the expression in question is represented by some F(t, u, v) at u = f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means F(t, u, v).diff(u) at u = f(t). We do not allow derivatives to be taken with respect to expressions where this is not so well defined. For example, we do not allow expr.diff(x*y) because there are multiple ways of structurally defining where x*y appears in an expression, some of which may surprise the reader (for example, a very strict definition would have that (x*y*z).diff(x*y) == 0). >>> from sympy.abc import x, y, z >>> (x*y*z).diff(x*y) Traceback (most recent call last): ... ValueError: Can't differentiate wrt the variable: x*y, 1 Note that this definition also fits in nicely with the definition of the chain rule. Note how the chain rule in SymPy is defined using unevaluated Subs objects:: >>> from sympy import symbols, Function >>> f, g = symbols('f g', cls=Function) >>> f(2*g(x)).diff(x) 2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (2*g(x),)) >>> f(g(x)).diff(x) Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),)) Finally, note that, to be consistent with variational calculus, and to ensure that the definition of substituting a Function for a Symbol in an expression is well-defined, derivatives of functions are assumed to not be related to the function. In other words, we have:: >>> from sympy import diff >>> diff(f(x), x).diff(f(x)) 0 The same is true for derivatives of different orders:: >>> diff(f(x), x, 2).diff(diff(f(x), x, 1)) 0 >>> diff(f(x), x, 1).diff(diff(f(x), x, 2)) 0 Note, any class can allow derivatives to be taken with respect to itself. See the docstring of Expr._diff_wrt. Examples ======== Some basic examples: >>> from sympy import Derivative, Symbol, Function >>> f = Function('f') >>> g = Function('g') >>> x = Symbol('x') >>> y = Symbol('y') >>> Derivative(x**2, x, evaluate=True) 2*x >>> Derivative(Derivative(f(x,y), x), y) Derivative(f(x, y), x, y) >>> Derivative(f(x), x, 3) Derivative(f(x), x, x, x) >>> Derivative(f(x, y), y, x, evaluate=True) Derivative(f(x, y), x, y) Now some derivatives wrt functions: >>> Derivative(f(x)**2, f(x), evaluate=True) 2*f(x) >>> Derivative(f(g(x)), x, evaluate=True) Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (g(x),)) """ is_Derivative = True @property def _diff_wrt(self): """Allow derivatives wrt Derivatives if it contains a function. Examples ======== >>> from sympy import Function, Symbol, Derivative >>> f = Function('f') >>> x = Symbol('x') >>> Derivative(f(x),x)._diff_wrt True >>> Derivative(x**2,x)._diff_wrt False """ if self.expr.is_Function: return True else: return False def __new__(cls, expr, *variables, **assumptions): expr = sympify(expr) # There are no variables, we differentiate wrt all of the free symbols # in expr. if not variables: variables = expr.free_symbols if len(variables) != 1: from sympy.utilities.misc import filldedent raise ValueError(filldedent(''' Since there is more than one variable in the expression, the variable(s) of differentiation must be supplied to differentiate %s''' % expr)) # Standardize the variables by sympifying them and making appending a # count of 1 if there is only one variable: diff(e,x)->diff(e,x,1). variables = list(sympify(variables)) if not variables[-1].is_Integer or len(variables) == 1: variables.append(S.One) # Split the list of variables into a list of the variables we are diff # wrt, where each element of the list has the form (s, count) where # s is the entity to diff wrt and count is the order of the # derivative. variable_count = [] all_zero = True i = 0 while i < len(variables) - 1: # process up to final Integer v, count = variables[i: i + 2] iwas = i if v._diff_wrt: # We need to test the more specific case of count being an # Integer first. if count.is_Integer: count = int(count) i += 2 elif count._diff_wrt: count = 1 i += 1 if i == iwas: # didn't get an update because of bad input from sympy.utilities.misc import filldedent last_digit = int(str(count)[-1]) ordinal = 'st' if last_digit == 1 else 'nd' if last_digit == 2 else 'rd' if last_digit == 3 else 'th' raise ValueError(filldedent(''' Can\'t calculate %s%s derivative wrt %s.''' % (count, ordinal, v))) if all_zero and not count == 0: all_zero = False if count: variable_count.append((v, count)) # We make a special case for 0th derivative, because there is no # good way to unambiguously print this. if all_zero: return expr # Pop evaluate because it is not really an assumption and we will need # to track it carefully below. evaluate = assumptions.pop('evaluate', False) # Look for a quick exit if there are symbols that don't appear in # expression at all. Note, this cannnot check non-symbols like # functions and Derivatives as those can be created by intermediate # derivatives. if evaluate: symbol_set = set(sc[0] for sc in variable_count if sc[0].is_Symbol) if symbol_set.difference(expr.free_symbols): return S.Zero # We make a generator so as to only generate a variable when necessary. # If a high order of derivative is requested and the expr becomes 0 # after a few differentiations, then we won't need the other variables. variablegen = (v for v, count in variable_count for i in range(count)) # If we can't compute the derivative of expr (but we wanted to) and # expr is itself not a Derivative, finish building an unevaluated # derivative class by calling Expr.__new__. if (not (hasattr(expr, '_eval_derivative') and evaluate) and (not isinstance(expr, Derivative))): variables = list(variablegen) # If we wanted to evaluate, we sort the variables into standard # order for later comparisons. This is too aggressive if evaluate # is False, so we don't do it in that case. if evaluate: #TODO: check if assumption of discontinuous derivatives exist variables = cls._sort_variables(variables) # Here we *don't* need to reinject evaluate into assumptions # because we are done with it and it is not an assumption that # Expr knows about. obj = Expr.__new__(cls, expr, *variables, **assumptions) return obj # Compute the derivative now by repeatedly calling the # _eval_derivative method of expr for each variable. When this method # returns None, the derivative couldn't be computed wrt that variable # and we save the variable for later. unhandled_variables = [] # Once we encouter a non_symbol that is unhandled, we stop taking # derivatives entirely. This is because derivatives wrt functions # don't commute with derivatives wrt symbols and we can't safely # continue. unhandled_non_symbol = False nderivs = 0 # how many derivatives were performed for v in variablegen: is_symbol = v.is_Symbol if unhandled_non_symbol: obj = None else: if not is_symbol: new_v = Dummy('xi_%i' % i) new_v.dummy_index = hash(v) expr = expr.subs(v, new_v) old_v = v v = new_v obj = expr._eval_derivative(v) nderivs += 1 if not is_symbol: if obj is not None: obj = obj.subs(v, old_v) v = old_v if obj is None: unhandled_variables.append(v) if not is_symbol: unhandled_non_symbol = True elif obj is S.Zero: return S.Zero else: expr = obj if unhandled_variables: unhandled_variables = cls._sort_variables(unhandled_variables) expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions) else: # We got a Derivative at the end of it all, and we rebuild it by # sorting its variables. if isinstance(expr, Derivative): expr = cls( expr.args[0], *cls._sort_variables(expr.args[1:]) ) if nderivs > 1 and assumptions.get('simplify', True): from sympy.core.exprtools import factor_terms from sympy.simplify.simplify import signsimp expr = factor_terms(signsimp(expr)) return expr @classmethod def _sort_variables(cls, vars): """Sort variables, but disallow sorting of non-symbols. When taking derivatives, the following rules usually hold: * Derivative wrt different symbols commute. * Derivative wrt different non-symbols commute. * Derivatives wrt symbols and non-symbols don't commute. Examples ======== >>> from sympy import Derivative, Function, symbols >>> vsort = Derivative._sort_variables >>> x, y, z = symbols('x y z') >>> f, g, h = symbols('f g h', cls=Function) >>> vsort((x,y,z)) [x, y, z] >>> vsort((h(x),g(x),f(x))) [f(x), g(x), h(x)] >>> vsort((z,y,x,h(x),g(x),f(x))) [x, y, z, f(x), g(x), h(x)] >>> vsort((x,f(x),y,f(y))) [x, f(x), y, f(y)] >>> vsort((y,x,g(x),f(x),z,h(x),y,x)) [x, y, f(x), g(x), z, h(x), x, y] >>> vsort((z,y,f(x),x,f(x),g(x))) [y, z, f(x), x, f(x), g(x)] >>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x)) [y, z, f(x), x, f(x), g(x), x, y, z, z] """ sorted_vars = [] symbol_part = [] non_symbol_part = [] for v in vars: if not v.is_Symbol: if len(symbol_part) > 0: sorted_vars.extend(sorted(symbol_part, key=default_sort_key)) symbol_part = [] non_symbol_part.append(v) else: if len(non_symbol_part) > 0: sorted_vars.extend(sorted(non_symbol_part, key=default_sort_key)) non_symbol_part = [] symbol_part.append(v) if len(non_symbol_part) > 0: sorted_vars.extend(sorted(non_symbol_part, key=default_sort_key)) if len(symbol_part) > 0: sorted_vars.extend(sorted(symbol_part, key=default_sort_key)) return sorted_vars def _eval_is_commutative(self): return self.expr.is_commutative def _eval_derivative(self, v): # If the variable s we are diff wrt is not in self.variables, we # assume that we might be able to take the derivative. if v not in self.variables: obj = self.expr.diff(v) if obj is S.Zero: return S.Zero if isinstance(obj, Derivative): return obj.func(obj.expr, *(self.variables + obj.variables)) # The derivative wrt s could have simplified things such that the # derivative wrt things in self.variables can now be done. Thus, # we set evaluate=True to see if there are any other derivatives # that can be done. The most common case is when obj is a simple # number so that the derivative wrt anything else will vanish. return self.func(obj, *self.variables, evaluate=True) # In this case s was in self.variables so the derivatve wrt s has # already been attempted and was not computed, either because it # couldn't be or evaluate=False originally. return self.func(self.expr, *(self.variables + (v, )), evaluate=False) def doit(self, **hints): expr = self.expr if hints.get('deep', True): expr = expr.doit(**hints) hints['evaluate'] = True return self.func(expr, *self.variables, **hints) @_sympifyit('z0', NotImplementedError) def doit_numerically(self, z0): """ Evaluate the derivative at z numerically. When we can represent derivatives at a point, this should be folded into the normal evalf. For now, we need a special method. """ import mpmath from sympy.core.expr import Expr if len(self.free_symbols) != 1 or len(self.variables) != 1: raise NotImplementedError('partials and higher order derivatives') z = list(self.free_symbols)[0] def eval(x): f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec)) f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec)) return f0._to_mpmath(mpmath.mp.prec) return Expr._from_mpmath(mpmath.diff(eval, z0._to_mpmath(mpmath.mp.prec)), mpmath.mp.prec) @property def expr(self): return self._args[0] @property def variables(self): return self._args[1:] @property def free_symbols(self): return self.expr.free_symbols def _eval_subs(self, old, new): if old in self.variables and not new.is_Symbol: # issue 4719 return Subs(self, old, new) # If both are Derivatives with the same expr, check if old is # equivalent to self or if old is a subderivative of self. if old.is_Derivative and old.expr == self.args[0]: # Check if canonnical order of variables is equal. old_vars = Derivative._sort_variables(old.variables) self_vars = Derivative._sort_variables(self.args[1:]) if old_vars == self_vars: return new # Check if olf is a subderivative of self. if len(old_vars) < len(self_vars): self_vars_front = [] match = True while old_vars and self_vars and match: if old_vars[0] == self_vars[0]: old_vars.pop(0) self_vars.pop(0) else: # If self_v does not match old_v, we need to check if # the types are the same (symbol vs non-symbol). If # they are, we can continue checking self_vars for a # match. if old_vars[0].is_Symbol != self_vars[0].is_Symbol: match = False else: self_vars_front.append(self_vars.pop(0)) if match: variables = self_vars_front + self_vars return Derivative(new, *variables) return Derivative(*(x._subs(old, new) for x in self.args)) def _eval_lseries(self, x, logx): dx = self.args[1:] for term in self.args[0].lseries(x, logx=logx): yield self.func(term, *dx) def _eval_nseries(self, x, n, logx): arg = self.args[0].nseries(x, n=n, logx=logx) o = arg.getO() dx = self.args[1:] rv = [self.func(a, *dx) for a in Add.make_args(arg.removeO())] if o: rv.append(o/x) return Add(*rv) def _eval_as_leading_term(self, x): return self.args[0].as_leading_term(x) def _sage_(self): import sage.all as sage args = [arg._sage_() for arg in self.args] return sage.derivative(*args) class Lambda(Expr): """ Lambda(x, expr) represents a lambda function similar to Python's 'lambda x: expr'. A function of several variables is written as Lambda((x, y, ...), expr). A simple example: >>> from sympy import Lambda >>> from sympy.abc import x >>> f = Lambda(x, x**2) >>> f(4) 16 For multivariate functions, use: >>> from sympy.abc import y, z, t >>> f2 = Lambda((x, y, z, t), x + y**z + t**z) >>> f2(1, 2, 3, 4) 73 A handy shortcut for lots of arguments: >>> p = x, y, z >>> f = Lambda(p, x + y*z) >>> f(*p) x + y*z """ is_Function = True def __new__(cls, variables, expr): from sympy.sets.sets import FiniteSet try: for v in variables if iterable(variables) else [variables]: if not v.is_Symbol: raise TypeError('variable is not a symbol: %s' % v) except (AssertionError, AttributeError): raise ValueError('variable is not a Symbol: %s' % v) try: variables = Tuple(*variables) except TypeError: variables = Tuple(variables) if len(variables) == 1 and variables[0] == expr: return S.IdentityFunction obj = Expr.__new__(cls, Tuple(*variables), S(expr)) obj.nargs = FiniteSet(len(variables)) return obj @property def variables(self): """The variables used in the internal representation of the function""" return self._args[0] @property def expr(self): """The return value of the function""" return self._args[1] @property def free_symbols(self): return self.expr.free_symbols - set(self.variables) def __call__(self, *args): n = len(args) if n not in self.nargs: # Lambda only ever has 1 value in nargs # XXX: exception message must be in exactly this format to # make it work with NumPy's functions like vectorize(). See, # for example, https://github.com/numpy/numpy/issues/1697. # The ideal solution would be just to attach metadata to # the exception and change NumPy to take advantage of this. ## XXX does this apply to Lambda? If not, remove this comment. temp = ('%(name)s takes exactly %(args)s ' 'argument%(plural)s (%(given)s given)') raise TypeError(temp % { 'name': self, 'args': list(self.nargs)[0], 'plural': 's'*(list(self.nargs)[0] != 1), 'given': n}) return self.expr.xreplace(dict(list(zip(self.variables, args)))) def __eq__(self, other): if not isinstance(other, Lambda): return False if self.nargs != other.nargs: return False selfexpr = self.args[1] otherexpr = other.args[1] otherexpr = otherexpr.xreplace(dict(list(zip(other.args[0], self.args[0])))) return selfexpr == otherexpr def __ne__(self, other): return not(self == other) def __hash__(self): return super(Lambda, self).__hash__() def _hashable_content(self): return (self.expr.xreplace(self.canonical_variables),) @property def is_identity(self): """Return ``True`` if this ``Lambda`` is an identity function. """ if len(self.args) == 2: return self.args[0] == self.args[1] else: return None class Subs(Expr): """ Represents unevaluated substitutions of an expression. ``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or list of distinct variables and a point or list of evaluation points corresponding to those variables. ``Subs`` objects are generally useful to represent unevaluated derivatives calculated at a point. The variables may be expressions, but they are subjected to the limitations of subs(), so it is usually a good practice to use only symbols for variables, since in that case there can be no ambiguity. There's no automatic expansion - use the method .doit() to effect all possible substitutions of the object and also of objects inside the expression. When evaluating derivatives at a point that is not a symbol, a Subs object is returned. One is also able to calculate derivatives of Subs objects - in this case the expression is always expanded (for the unevaluated form, use Derivative()). A simple example: >>> from sympy import Subs, Function, sin >>> from sympy.abc import x, y, z >>> f = Function('f') >>> e = Subs(f(x).diff(x), x, y) >>> e.subs(y, 0) Subs(Derivative(f(x), x), (x,), (0,)) >>> e.subs(f, sin).doit() cos(y) An example with several variables: >>> Subs(f(x)*sin(y) + z, (x, y), (0, 1)) Subs(z + f(x)*sin(y), (x, y), (0, 1)) >>> _.doit() z + f(0)*sin(1) """ def __new__(cls, expr, variables, point, **assumptions): from sympy import Symbol if not is_sequence(variables, Tuple): variables = [variables] variables = list(sympify(variables)) if list(uniq(variables)) != variables: repeated = [ v for v in set(variables) if variables.count(v) > 1 ] raise ValueError('cannot substitute expressions %s more than ' 'once.' % repeated) point = Tuple(*(point if is_sequence(point, Tuple) else [point])) if len(point) != len(variables): raise ValueError('Number of point values must be the same as ' 'the number of variables.') expr = sympify(expr) # use symbols with names equal to the point value (with preppended _) # to give a variable-independent expression pre = "_" pts = sorted(set(point), key=default_sort_key) from sympy.printing import StrPrinter class CustomStrPrinter(StrPrinter): def _print_Dummy(self, expr): return str(expr) + str(expr.dummy_index) def mystr(expr, **settings): p = CustomStrPrinter(settings) return p.doprint(expr) while 1: s_pts = dict([(p, Symbol(pre + mystr(p))) for p in pts]) reps = [(v, s_pts[p]) for v, p in zip(variables, point)] # if any underscore-preppended symbol is already a free symbol # and is a variable with a different point value, then there # is a clash, e.g. _0 clashes in Subs(_0 + _1, (_0, _1), (1, 0)) # because the new symbol that would be created is _1 but _1 # is already mapped to 0 so __0 and __1 are used for the new # symbols if any(r in expr.free_symbols and r in variables and Symbol(pre + mystr(point[variables.index(r)])) != r for _, r in reps): pre += "_" continue break obj = Expr.__new__(cls, expr, Tuple(*variables), point) obj._expr = expr.subs(reps) return obj def _eval_is_commutative(self): return self.expr.is_commutative def doit(self): return self.expr.doit().subs(list(zip(self.variables, self.point))) def evalf(self, prec=None, **options): return self.doit().evalf(prec, **options) n = evalf @property def variables(self): """The variables to be evaluated""" return self._args[1] @property def expr(self): """The expression on which the substitution operates""" return self._args[0] @property def point(self): """The values for which the variables are to be substituted""" return self._args[2] @property def free_symbols(self): return (self.expr.free_symbols - set(self.variables) | set(self.point.free_symbols)) def __eq__(self, other): if not isinstance(other, Subs): return False return self._expr == other._expr def __ne__(self, other): return not(self == other) def __hash__(self): return super(Subs, self).__hash__() def _hashable_content(self): return (self._expr.xreplace(self.canonical_variables),) def _eval_subs(self, old, new): if old in self.variables: return self def _eval_derivative(self, s): if s not in self.free_symbols: return S.Zero return self.func(self.expr.diff(s), self.variables, self.point).doit() \ + Add(*[ Subs(point.diff(s) * self.expr.diff(arg), self.variables, self.point).doit() for arg, point in zip(self.variables, self.point) ]) def diff(f, *symbols, **kwargs): """ Differentiate f with respect to symbols. This is just a wrapper to unify .diff() and the Derivative class; its interface is similar to that of integrate(). You can use the same shortcuts for multiple variables as with Derivative. For example, diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative of f(x). You can pass evaluate=False to get an unevaluated Derivative class. Note that if there are 0 symbols (such as diff(f(x), x, 0), then the result will be the function (the zeroth derivative), even if evaluate=False. Examples ======== >>> from sympy import sin, cos, Function, diff >>> from sympy.abc import x, y >>> f = Function('f') >>> diff(sin(x), x) cos(x) >>> diff(f(x), x, x, x) Derivative(f(x), x, x, x) >>> diff(f(x), x, 3) Derivative(f(x), x, x, x) >>> diff(sin(x)*cos(y), x, 2, y, 2) sin(x)*cos(y) >>> type(diff(sin(x), x)) cos >>> type(diff(sin(x), x, evaluate=False)) <class 'sympy.core.function.Derivative'> >>> type(diff(sin(x), x, 0)) sin >>> type(diff(sin(x), x, 0, evaluate=False)) sin >>> diff(sin(x)) cos(x) >>> diff(sin(x*y)) Traceback (most recent call last): ... ValueError: specify differentiation variables to differentiate sin(x*y) Note that ``diff(sin(x))`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. References ========== http://reference.wolfram.com/legacy/v5_2/Built-inFunctions/AlgebraicComputation/Calculus/D.html See Also ======== Derivative sympy.geometry.util.idiff: computes the derivative implicitly """ kwargs.setdefault('evaluate', True) try: return f._eval_diff(*symbols, **kwargs) except AttributeError: pass return Derivative(f, *symbols, **kwargs) def expand(e, deep=True, modulus=None, power_base=True, power_exp=True, mul=True, log=True, multinomial=True, basic=True, **hints): """ Expand an expression using methods given as hints. Hints evaluated unless explicitly set to False are: ``basic``, ``log``, ``multinomial``, ``mul``, ``power_base``, and ``power_exp`` The following hints are supported but not applied unless set to True: ``complex``, ``func``, and ``trig``. In addition, the following meta-hints are supported by some or all of the other hints: ``frac``, ``numer``, ``denom``, ``modulus``, and ``force``. ``deep`` is supported by all hints. Additionally, subclasses of Expr may define their own hints or meta-hints. The ``basic`` hint is used for any special rewriting of an object that should be done automatically (along with the other hints like ``mul``) when expand is called. This is a catch-all hint to handle any sort of expansion that may not be described by the existing hint names. To use this hint an object should override the ``_eval_expand_basic`` method. Objects may also define their own expand methods, which are not run by default. See the API section below. If ``deep`` is set to ``True`` (the default), things like arguments of functions are recursively expanded. Use ``deep=False`` to only expand on the top level. If the ``force`` hint is used, assumptions about variables will be ignored in making the expansion. Hints ===== These hints are run by default mul --- Distributes multiplication over addition: >>> from sympy import cos, exp, sin >>> from sympy.abc import x, y, z >>> (y*(x + z)).expand(mul=True) x*y + y*z multinomial ----------- Expand (x + y + ...)**n where n is a positive integer. >>> ((x + y + z)**2).expand(multinomial=True) x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2 power_exp --------- Expand addition in exponents into multiplied bases. >>> exp(x + y).expand(power_exp=True) exp(x)*exp(y) >>> (2**(x + y)).expand(power_exp=True) 2**x*2**y power_base ---------- Split powers of multiplied bases. This only happens by default if assumptions allow, or if the ``force`` meta-hint is used: >>> ((x*y)**z).expand(power_base=True) (x*y)**z >>> ((x*y)**z).expand(power_base=True, force=True) x**z*y**z >>> ((2*y)**z).expand(power_base=True) 2**z*y**z Note that in some cases where this expansion always holds, SymPy performs it automatically: >>> (x*y)**2 x**2*y**2 log --- Pull out power of an argument as a coefficient and split logs products into sums of logs. Note that these only work if the arguments of the log function have the proper assumptions--the arguments must be positive and the exponents must be real--or else the ``force`` hint must be True: >>> from sympy import log, symbols >>> log(x**2*y).expand(log=True) log(x**2*y) >>> log(x**2*y).expand(log=True, force=True) 2*log(x) + log(y) >>> x, y = symbols('x,y', positive=True) >>> log(x**2*y).expand(log=True) 2*log(x) + log(y) basic ----- This hint is intended primarily as a way for custom subclasses to enable expansion by default. These hints are not run by default: complex ------- Split an expression into real and imaginary parts. >>> x, y = symbols('x,y') >>> (x + y).expand(complex=True) re(x) + re(y) + I*im(x) + I*im(y) >>> cos(x).expand(complex=True) -I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x)) Note that this is just a wrapper around ``as_real_imag()``. Most objects that wish to redefine ``_eval_expand_complex()`` should consider redefining ``as_real_imag()`` instead. func ---- Expand other functions. >>> from sympy import gamma >>> gamma(x + 1).expand(func=True) x*gamma(x) trig ---- Do trigonometric expansions. >>> cos(x + y).expand(trig=True) -sin(x)*sin(y) + cos(x)*cos(y) >>> sin(2*x).expand(trig=True) 2*sin(x)*cos(x) Note that the forms of ``sin(n*x)`` and ``cos(n*x)`` in terms of ``sin(x)`` and ``cos(x)`` are not unique, due to the identity `\sin^2(x) + \cos^2(x) = 1`. The current implementation uses the form obtained from Chebyshev polynomials, but this may change. See `this MathWorld article <http://mathworld.wolfram.com/Multiple-AngleFormulas.html>`_ for more information. Notes ===== - You can shut off unwanted methods:: >>> (exp(x + y)*(x + y)).expand() x*exp(x)*exp(y) + y*exp(x)*exp(y) >>> (exp(x + y)*(x + y)).expand(power_exp=False) x*exp(x + y) + y*exp(x + y) >>> (exp(x + y)*(x + y)).expand(mul=False) (x + y)*exp(x)*exp(y) - Use deep=False to only expand on the top level:: >>> exp(x + exp(x + y)).expand() exp(x)*exp(exp(x)*exp(y)) >>> exp(x + exp(x + y)).expand(deep=False) exp(x)*exp(exp(x + y)) - Hints are applied in an arbitrary, but consistent order (in the current implementation, they are applied in alphabetical order, except multinomial comes before mul, but this may change). Because of this, some hints may prevent expansion by other hints if they are applied first. For example, ``mul`` may distribute multiplications and prevent ``log`` and ``power_base`` from expanding them. Also, if ``mul`` is applied before ``multinomial`, the expression might not be fully distributed. The solution is to use the various ``expand_hint`` helper functions or to use ``hint=False`` to this function to finely control which hints are applied. Here are some examples:: >>> from sympy import expand, expand_mul, expand_power_base >>> x, y, z = symbols('x,y,z', positive=True) >>> expand(log(x*(y + z))) log(x) + log(y + z) Here, we see that ``log`` was applied before ``mul``. To get the mul expanded form, either of the following will work:: >>> expand_mul(log(x*(y + z))) log(x*y + x*z) >>> expand(log(x*(y + z)), log=False) log(x*y + x*z) A similar thing can happen with the ``power_base`` hint:: >>> expand((x*(y + z))**x) (x*y + x*z)**x To get the ``power_base`` expanded form, either of the following will work:: >>> expand((x*(y + z))**x, mul=False) x**x*(y + z)**x >>> expand_power_base((x*(y + z))**x) x**x*(y + z)**x >>> expand((x + y)*y/x) y + y**2/x The parts of a rational expression can be targeted:: >>> expand((x + y)*y/x/(x + 1), frac=True) (x*y + y**2)/(x**2 + x) >>> expand((x + y)*y/x/(x + 1), numer=True) (x*y + y**2)/(x*(x + 1)) >>> expand((x + y)*y/x/(x + 1), denom=True) y*(x + y)/(x**2 + x) - The ``modulus`` meta-hint can be used to reduce the coefficients of an expression post-expansion:: >>> expand((3*x + 1)**2) 9*x**2 + 6*x + 1 >>> expand((3*x + 1)**2, modulus=5) 4*x**2 + x + 1 - Either ``expand()`` the function or ``.expand()`` the method can be used. Both are equivalent:: >>> expand((x + 1)**2) x**2 + 2*x + 1 >>> ((x + 1)**2).expand() x**2 + 2*x + 1 API === Objects can define their own expand hints by defining ``_eval_expand_hint()``. The function should take the form:: def _eval_expand_hint(self, **hints): # Only apply the method to the top-level expression ... See also the example below. Objects should define ``_eval_expand_hint()`` methods only if ``hint`` applies to that specific object. The generic ``_eval_expand_hint()`` method defined in Expr will handle the no-op case. Each hint should be responsible for expanding that hint only. Furthermore, the expansion should be applied to the top-level expression only. ``expand()`` takes care of the recursion that happens when ``deep=True``. You should only call ``_eval_expand_hint()`` methods directly if you are 100% sure that the object has the method, as otherwise you are liable to get unexpected ``AttributeError``s. Note, again, that you do not need to recursively apply the hint to args of your object: this is handled automatically by ``expand()``. ``_eval_expand_hint()`` should generally not be used at all outside of an ``_eval_expand_hint()`` method. If you want to apply a specific expansion from within another method, use the public ``expand()`` function, method, or ``expand_hint()`` functions. In order for expand to work, objects must be rebuildable by their args, i.e., ``obj.func(*obj.args) == obj`` must hold. Expand methods are passed ``**hints`` so that expand hints may use 'metahints'--hints that control how different expand methods are applied. For example, the ``force=True`` hint described above that causes ``expand(log=True)`` to ignore assumptions is such a metahint. The ``deep`` meta-hint is handled exclusively by ``expand()`` and is not passed to ``_eval_expand_hint()`` methods. Note that expansion hints should generally be methods that perform some kind of 'expansion'. For hints that simply rewrite an expression, use the .rewrite() API. Examples ======== >>> from sympy import Expr, sympify >>> class MyClass(Expr): ... def __new__(cls, *args): ... args = sympify(args) ... return Expr.__new__(cls, *args) ... ... def _eval_expand_double(self, **hints): ... ''' ... Doubles the args of MyClass. ... ... If there more than four args, doubling is not performed, ... unless force=True is also used (False by default). ... ''' ... force = hints.pop('force', False) ... if not force and len(self.args) > 4: ... return self ... return self.func(*(self.args + self.args)) ... >>> a = MyClass(1, 2, MyClass(3, 4)) >>> a MyClass(1, 2, MyClass(3, 4)) >>> a.expand(double=True) MyClass(1, 2, MyClass(3, 4, 3, 4), 1, 2, MyClass(3, 4, 3, 4)) >>> a.expand(double=True, deep=False) MyClass(1, 2, MyClass(3, 4), 1, 2, MyClass(3, 4)) >>> b = MyClass(1, 2, 3, 4, 5) >>> b.expand(double=True) MyClass(1, 2, 3, 4, 5) >>> b.expand(double=True, force=True) MyClass(1, 2, 3, 4, 5, 1, 2, 3, 4, 5) See Also ======== expand_log, expand_mul, expand_multinomial, expand_complex, expand_trig, expand_power_base, expand_power_exp, expand_func, hyperexpand """ # don't modify this; modify the Expr.expand method hints['power_base'] = power_base hints['power_exp'] = power_exp hints['mul'] = mul hints['log'] = log hints['multinomial'] = multinomial hints['basic'] = basic return sympify(e).expand(deep=deep, modulus=modulus, **hints) # This is a special application of two hints def _mexpand(expr, recursive=False): # expand multinomials and then expand products; this may not always # be sufficient to give a fully expanded expression (see # test_issue_8247_8354 in test_arit) if expr is None: return was = None while was != expr: was, expr = expr, expand_mul(expand_multinomial(expr)) if not recursive: break return expr # These are simple wrappers around single hints. def expand_mul(expr, deep=True): """ Wrapper around expand that only uses the mul hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_mul, exp, log >>> x, y = symbols('x,y', positive=True) >>> expand_mul(exp(x+y)*(x+y)*log(x*y**2)) x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2) """ return sympify(expr).expand(deep=deep, mul=True, power_exp=False, power_base=False, basic=False, multinomial=False, log=False) def expand_multinomial(expr, deep=True): """ Wrapper around expand that only uses the multinomial hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_multinomial, exp >>> x, y = symbols('x y', positive=True) >>> expand_multinomial((x + exp(x + 1))**2) x**2 + 2*x*exp(x + 1) + exp(2*x + 2) """ return sympify(expr).expand(deep=deep, mul=False, power_exp=False, power_base=False, basic=False, multinomial=True, log=False) def expand_log(expr, deep=True, force=False): """ Wrapper around expand that only uses the log hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_log, exp, log >>> x, y = symbols('x,y', positive=True) >>> expand_log(exp(x+y)*(x+y)*log(x*y**2)) (x + y)*(log(x) + 2*log(y))*exp(x + y) """ return sympify(expr).expand(deep=deep, log=True, mul=False, power_exp=False, power_base=False, multinomial=False, basic=False, force=force) def expand_func(expr, deep=True): """ Wrapper around expand that only uses the func hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_func, gamma >>> from sympy.abc import x >>> expand_func(gamma(x + 2)) x*(x + 1)*gamma(x) """ return sympify(expr).expand(deep=deep, func=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_trig(expr, deep=True): """ Wrapper around expand that only uses the trig hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_trig, sin >>> from sympy.abc import x, y >>> expand_trig(sin(x+y)*(x+y)) (x + y)*(sin(x)*cos(y) + sin(y)*cos(x)) """ return sympify(expr).expand(deep=deep, trig=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_complex(expr, deep=True): """ Wrapper around expand that only uses the complex hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_complex, exp, sqrt, I >>> from sympy.abc import z >>> expand_complex(exp(z)) I*exp(re(z))*sin(im(z)) + exp(re(z))*cos(im(z)) >>> expand_complex(sqrt(I)) sqrt(2)/2 + sqrt(2)*I/2 See Also ======== Expr.as_real_imag """ return sympify(expr).expand(deep=deep, complex=True, basic=False, log=False, mul=False, power_exp=False, power_base=False, multinomial=False) def expand_power_base(expr, deep=True, force=False): """ Wrapper around expand that only uses the power_base hint. See the expand docstring for more information. A wrapper to expand(power_base=True) which separates a power with a base that is a Mul into a product of powers, without performing any other expansions, provided that assumptions about the power's base and exponent allow. deep=False (default is True) will only apply to the top-level expression. force=True (default is False) will cause the expansion to ignore assumptions about the base and exponent. When False, the expansion will only happen if the base is non-negative or the exponent is an integer. >>> from sympy.abc import x, y, z >>> from sympy import expand_power_base, sin, cos, exp >>> (x*y)**2 x**2*y**2 >>> (2*x)**y (2*x)**y >>> expand_power_base(_) 2**y*x**y >>> expand_power_base((x*y)**z) (x*y)**z >>> expand_power_base((x*y)**z, force=True) x**z*y**z >>> expand_power_base(sin((x*y)**z), deep=False) sin((x*y)**z) >>> expand_power_base(sin((x*y)**z), force=True) sin(x**z*y**z) >>> expand_power_base((2*sin(x))**y + (2*cos(x))**y) 2**y*sin(x)**y + 2**y*cos(x)**y >>> expand_power_base((2*exp(y))**x) 2**x*exp(y)**x >>> expand_power_base((2*cos(x))**y) 2**y*cos(x)**y Notice that sums are left untouched. If this is not the desired behavior, apply full ``expand()`` to the expression: >>> expand_power_base(((x+y)*z)**2) z**2*(x + y)**2 >>> (((x+y)*z)**2).expand() x**2*z**2 + 2*x*y*z**2 + y**2*z**2 >>> expand_power_base((2*y)**(1+z)) 2**(z + 1)*y**(z + 1) >>> ((2*y)**(1+z)).expand() 2*2**z*y*y**z """ return sympify(expr).expand(deep=deep, log=False, mul=False, power_exp=False, power_base=True, multinomial=False, basic=False, force=force) def expand_power_exp(expr, deep=True): """ Wrapper around expand that only uses the power_exp hint. See the expand docstring for more information. Examples ======== >>> from sympy import expand_power_exp >>> from sympy.abc import x, y >>> expand_power_exp(x**(y + 2)) x**2*x**y """ return sympify(expr).expand(deep=deep, complex=False, basic=False, log=False, mul=False, power_exp=True, power_base=False, multinomial=False) def count_ops(expr, visual=False): """ Return a representation (integer or expression) of the operations in expr. If ``visual`` is ``False`` (default) then the sum of the coefficients of the visual expression will be returned. If ``visual`` is ``True`` then the number of each type of operation is shown with the core class types (or their virtual equivalent) multiplied by the number of times they occur. If expr is an iterable, the sum of the op counts of the items will be returned. Examples ======== >>> from sympy.abc import a, b, x, y >>> from sympy import sin, count_ops Although there isn't a SUB object, minus signs are interpreted as either negations or subtractions: >>> (x - y).count_ops(visual=True) SUB >>> (-x).count_ops(visual=True) NEG Here, there are two Adds and a Pow: >>> (1 + a + b**2).count_ops(visual=True) 2*ADD + POW In the following, an Add, Mul, Pow and two functions: >>> (sin(x)*x + sin(x)**2).count_ops(visual=True) ADD + MUL + POW + 2*SIN for a total of 5: >>> (sin(x)*x + sin(x)**2).count_ops(visual=False) 5 Note that "what you type" is not always what you get. The expression 1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather than two DIVs: >>> (1/x/y).count_ops(visual=True) DIV + MUL The visual option can be used to demonstrate the difference in operations for expressions in different forms. Here, the Horner representation is compared with the expanded form of a polynomial: >>> eq=x*(1 + x*(2 + x*(3 + x))) >>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True) -MUL + 3*POW The count_ops function also handles iterables: >>> count_ops([x, sin(x), None, True, x + 2], visual=False) 2 >>> count_ops([x, sin(x), None, True, x + 2], visual=True) ADD + SIN >>> count_ops({x: sin(x), x + 2: y + 1}, visual=True) 2*ADD + SIN """ from sympy import Integral, Symbol from sympy.simplify.radsimp import fraction from sympy.logic.boolalg import BooleanFunction expr = sympify(expr) if isinstance(expr, Expr): ops = [] args = [expr] NEG = Symbol('NEG') DIV = Symbol('DIV') SUB = Symbol('SUB') ADD = Symbol('ADD') while args: a = args.pop() if isinstance(a, str): continue if a.is_Rational: #-1/3 = NEG + DIV if a is not S.One: if a.p < 0: ops.append(NEG) if a.q != 1: ops.append(DIV) continue elif a.is_Mul: if _coeff_isneg(a): ops.append(NEG) if a.args[0] is S.NegativeOne: a = a.as_two_terms()[1] else: a = -a n, d = fraction(a) if n.is_Integer: ops.append(DIV) if n < 0: ops.append(NEG) args.append(d) continue # won't be -Mul but could be Add elif d is not S.One: if not d.is_Integer: args.append(d) ops.append(DIV) args.append(n) continue # could be -Mul elif a.is_Add: aargs = list(a.args) negs = 0 for i, ai in enumerate(aargs): if _coeff_isneg(ai): negs += 1 args.append(-ai) if i > 0: ops.append(SUB) else: args.append(ai) if i > 0: ops.append(ADD) if negs == len(aargs): # -x - y = NEG + SUB ops.append(NEG) elif _coeff_isneg(aargs[0]): # -x + y = SUB, but already recorded ADD ops.append(SUB - ADD) continue if a.is_Pow and a.exp is S.NegativeOne: ops.append(DIV) args.append(a.base) # won't be -Mul but could be Add continue if (a.is_Mul or a.is_Pow or a.is_Function or isinstance(a, Derivative) or isinstance(a, Integral)): o = Symbol(a.func.__name__.upper()) # count the args if (a.is_Mul or isinstance(a, LatticeOp)): ops.append(o*(len(a.args) - 1)) else: ops.append(o) if not a.is_Symbol: args.extend(a.args) elif type(expr) is dict: ops = [count_ops(k, visual=visual) + count_ops(v, visual=visual) for k, v in expr.items()] elif iterable(expr): ops = [count_ops(i, visual=visual) for i in expr] elif isinstance(expr, BooleanFunction): ops = [] for arg in expr.args: ops.append(count_ops(arg, visual=True)) o = Symbol(expr.func.__name__.upper()) ops.append(o) elif not isinstance(expr, Basic): ops = [] else: # it's Basic not isinstance(expr, Expr): if not isinstance(expr, Basic): raise TypeError("Invalid type of expr") else: ops = [] args = [expr] while args: a = args.pop() if a.args: o = Symbol(a.func.__name__.upper()) if a.is_Boolean: ops.append(o*(len(a.args)-1)) else: ops.append(o) args.extend(a.args) if not ops: if visual: return S.Zero return 0 ops = Add(*ops) if visual: return ops if ops.is_Number: return int(ops) return sum(int((a.args or [1])[0]) for a in Add.make_args(ops)) def nfloat(expr, n=15, exponent=False): """Make all Rationals in expr Floats except those in exponents (unless the exponents flag is set to True). Examples ======== >>> from sympy.core.function import nfloat >>> from sympy.abc import x, y >>> from sympy import cos, pi, sqrt >>> nfloat(x**4 + x/2 + cos(pi/3) + 1 + sqrt(y)) x**4 + 0.5*x + sqrt(y) + 1.5 >>> nfloat(x**4 + sqrt(y), exponent=True) x**4.0 + y**0.5 """ from sympy.core.power import Pow from sympy.polys.rootoftools import RootOf if iterable(expr, exclude=string_types): if isinstance(expr, (dict, Dict)): return type(expr)([(k, nfloat(v, n, exponent)) for k, v in list(expr.items())]) return type(expr)([nfloat(a, n, exponent) for a in expr]) rv = sympify(expr) if rv.is_Number: return Float(rv, n) elif rv.is_number: # evalf doesn't always set the precision rv = rv.n(n) if rv.is_Number: rv = Float(rv.n(n), n) else: pass # pure_complex(rv) is likely True return rv # watch out for RootOf instances that don't like to have # their exponents replaced with Dummies and also sometimes have # problems with evaluating at low precision (issue 6393) rv = rv.xreplace(dict([(ro, ro.n(n)) for ro in rv.atoms(RootOf)])) if not exponent: reps = [(p, Pow(p.base, Dummy())) for p in rv.atoms(Pow)] rv = rv.xreplace(dict(reps)) rv = rv.n(n) if not exponent: rv = rv.xreplace(dict([(d.exp, p.exp) for p, d in reps])) else: # Pow._eval_evalf special cases Integer exponents so if # exponent is suppose to be handled we have to do so here rv = rv.xreplace(Transform( lambda x: Pow(x.base, Float(x.exp, n)), lambda x: x.is_Pow and x.exp.is_Integer)) return rv.xreplace(Transform( lambda x: x.func(*nfloat(x.args, n, exponent)), lambda x: isinstance(x, Function))) from sympy.core.symbol import Dummy
unknown
codeparrot/codeparrot-clean
"""Support for LCN lights.""" import pypck from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_TRANSITION, SUPPORT_BRIGHTNESS, SUPPORT_TRANSITION, Light, ) from homeassistant.const import CONF_ADDRESS from . import LcnDevice from .const import ( CONF_CONNECTIONS, CONF_DIMMABLE, CONF_OUTPUT, CONF_TRANSITION, DATA_LCN, OUTPUT_PORTS, ) from .helpers import get_connection async def async_setup_platform( hass, hass_config, async_add_entities, discovery_info=None ): """Set up the LCN light platform.""" if discovery_info is None: return devices = [] for config in discovery_info: address, connection_id = config[CONF_ADDRESS] addr = pypck.lcn_addr.LcnAddr(*address) connections = hass.data[DATA_LCN][CONF_CONNECTIONS] connection = get_connection(connections, connection_id) address_connection = connection.get_address_conn(addr) if config[CONF_OUTPUT] in OUTPUT_PORTS: device = LcnOutputLight(config, address_connection) else: # in RELAY_PORTS device = LcnRelayLight(config, address_connection) devices.append(device) async_add_entities(devices) class LcnOutputLight(LcnDevice, Light): """Representation of a LCN light for output ports.""" def __init__(self, config, address_connection): """Initialize the LCN light.""" super().__init__(config, address_connection) self.output = pypck.lcn_defs.OutputPort[config[CONF_OUTPUT]] self._transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION]) self.dimmable = config[CONF_DIMMABLE] self._brightness = 255 self._is_on = None self._is_dimming_to_zero = False async def async_added_to_hass(self): """Run when entity about to be added to hass.""" await super().async_added_to_hass() await self.address_connection.activate_status_request_handler(self.output) @property def supported_features(self): """Flag supported features.""" features = SUPPORT_TRANSITION if self.dimmable: features |= SUPPORT_BRIGHTNESS return features @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._brightness @property def is_on(self): """Return True if entity is on.""" return self._is_on async def async_turn_on(self, **kwargs): """Turn the entity on.""" self._is_on = True self._is_dimming_to_zero = False if ATTR_BRIGHTNESS in kwargs: percent = int(kwargs[ATTR_BRIGHTNESS] / 255.0 * 100) else: percent = 100 if ATTR_TRANSITION in kwargs: transition = pypck.lcn_defs.time_to_ramp_value( kwargs[ATTR_TRANSITION] * 1000 ) else: transition = self._transition self.address_connection.dim_output(self.output.value, percent, transition) await self.async_update_ha_state() async def async_turn_off(self, **kwargs): """Turn the entity off.""" self._is_on = False if ATTR_TRANSITION in kwargs: transition = pypck.lcn_defs.time_to_ramp_value( kwargs[ATTR_TRANSITION] * 1000 ) else: transition = self._transition self._is_dimming_to_zero = bool(transition) self.address_connection.dim_output(self.output.value, 0, transition) await self.async_update_ha_state() def input_received(self, input_obj): """Set light state when LCN input object (command) is received.""" if ( not isinstance(input_obj, pypck.inputs.ModStatusOutput) or input_obj.get_output_id() != self.output.value ): return self._brightness = int(input_obj.get_percent() / 100.0 * 255) if self.brightness == 0: self._is_dimming_to_zero = False if not self._is_dimming_to_zero: self._is_on = self.brightness > 0 self.async_schedule_update_ha_state() class LcnRelayLight(LcnDevice, Light): """Representation of a LCN light for relay ports.""" def __init__(self, config, address_connection): """Initialize the LCN light.""" super().__init__(config, address_connection) self.output = pypck.lcn_defs.RelayPort[config[CONF_OUTPUT]] self._is_on = None async def async_added_to_hass(self): """Run when entity about to be added to hass.""" await super().async_added_to_hass() await self.address_connection.activate_status_request_handler(self.output) @property def is_on(self): """Return True if entity is on.""" return self._is_on async def async_turn_on(self, **kwargs): """Turn the entity on.""" self._is_on = True states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8 states[self.output.value] = pypck.lcn_defs.RelayStateModifier.ON self.address_connection.control_relays(states) await self.async_update_ha_state() async def async_turn_off(self, **kwargs): """Turn the entity off.""" self._is_on = False states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8 states[self.output.value] = pypck.lcn_defs.RelayStateModifier.OFF self.address_connection.control_relays(states) await self.async_update_ha_state() def input_received(self, input_obj): """Set light state when LCN input object (command) is received.""" if not isinstance(input_obj, pypck.inputs.ModStatusRelays): return self._is_on = input_obj.get_state(self.output.value) self.async_schedule_update_ha_state()
unknown
codeparrot/codeparrot-clean
extension Validator where T: Equatable & CustomStringConvertible { /// Validates whether an item is contained in the supplied array. public static func `in`(_ array: T...) -> Validator<T> { .in(array) } /// Validates whether an item is contained in the supplied sequence. public static func `in`<S>(_ sequence: S) -> Validator<T> where S: Sequence & Sendable, S.Element == T { .init { ValidatorResults.In(item: $0, items: .init(sequence)) } } } extension ValidatorResults { /// `ValidatorResult` of a validator that validates whether an item is contained in the supplied sequence. public struct In<T> where T: Equatable & CustomStringConvertible & Sendable { /// Description of the item. public let item: T /// Descriptions of the elements of the supplied sequence. public let items: [T] } } extension ValidatorResults.In: ValidatorResult { public var isFailure: Bool { !self.items.contains(self.item) } public var successDescription: String? { self.makeDescription(not: false) } public var failureDescription: String? { self.makeDescription(not: true) } func makeDescription(not: Bool) -> String { let description: String switch self.items.count { case 1: description = self.items[0].description case 2: description = "\(self.items[0].description) or \(self.items[1].description)" default: let first = self.items[0..<(self.items.count - 1)] .map { $0.description }.joined(separator: ", ") let last = self.items[self.items.count - 1].description description = "\(first), or \(last)" } return "is\(not ? " not" : " ") \(description)" } }
swift
github
https://github.com/vapor/vapor
Sources/Vapor/Validation/Validators/In.swift
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.gradle.tasks.bundling; import javax.inject.Inject; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Nested; import org.gradle.api.tasks.Optional; import org.jspecify.annotations.Nullable; import org.springframework.boot.buildpack.platform.build.BuilderDockerConfiguration; import org.springframework.boot.buildpack.platform.docker.configuration.DockerRegistryAuthentication; /** * Encapsulates Docker configuration options. * * @author Wei Jiang * @author Scott Frederick * @since 2.4.0 */ public abstract class DockerSpec { private final DockerRegistrySpec builderRegistry; private final DockerRegistrySpec publishRegistry; @Inject public DockerSpec(ObjectFactory objects) { this.builderRegistry = objects.newInstance(DockerRegistrySpec.class); this.publishRegistry = objects.newInstance(DockerRegistrySpec.class); getBindHostToBuilder().convention(false); getTlsVerify().convention(false); } DockerSpec(DockerRegistrySpec builderRegistry, DockerRegistrySpec publishRegistry) { this.builderRegistry = builderRegistry; this.publishRegistry = publishRegistry; } @Input @Optional public abstract Property<String> getContext(); @Input @Optional public abstract Property<String> getHost(); @Input @Optional public abstract Property<Boolean> getTlsVerify(); @Input @Optional public abstract Property<String> getCertPath(); @Input @Optional public abstract Property<Boolean> getBindHostToBuilder(); /** * Returns the {@link DockerRegistrySpec} that configures authentication to the * builder registry. * @return the registry spec */ @Nested public DockerRegistrySpec getBuilderRegistry() { return this.builderRegistry; } /** * Customizes the {@link DockerRegistrySpec} that configures authentication to the * builder registry. * @param action the action to apply */ public void builderRegistry(Action<DockerRegistrySpec> action) { action.execute(this.builderRegistry); } /** * Returns the {@link DockerRegistrySpec} that configures authentication to the * publishing registry. * @return the registry spec */ @Nested public DockerRegistrySpec getPublishRegistry() { return this.publishRegistry; } /** * Customizes the {@link DockerRegistrySpec} that configures authentication to the * publishing registry. * @param action the action to apply */ public void publishRegistry(Action<DockerRegistrySpec> action) { action.execute(this.publishRegistry); } /** * Returns this configuration as a {@link BuilderDockerConfiguration} instance. This * method should only be called when the configuration is complete and will no longer * be changed. * @return the Docker configuration */ BuilderDockerConfiguration asDockerConfiguration() { BuilderDockerConfiguration dockerConfiguration = new BuilderDockerConfiguration(); dockerConfiguration = customizeHost(dockerConfiguration); dockerConfiguration = dockerConfiguration.withBindHostToBuilder(getBindHostToBuilder().get()); dockerConfiguration = customizeBuilderAuthentication(dockerConfiguration); dockerConfiguration = customizePublishAuthentication(dockerConfiguration); return dockerConfiguration; } private BuilderDockerConfiguration customizeHost(BuilderDockerConfiguration dockerConfiguration) { String context = getContext().getOrNull(); String host = getHost().getOrNull(); if (context != null && host != null) { throw new GradleException( "Invalid Docker configuration, either context or host can be provided but not both"); } if (context != null) { return dockerConfiguration.withContext(context); } if (host != null) { return dockerConfiguration.withHost(host, getTlsVerify().get(), getCertPath().getOrNull()); } return dockerConfiguration; } private BuilderDockerConfiguration customizeBuilderAuthentication(BuilderDockerConfiguration dockerConfiguration) { return dockerConfiguration.withBuilderRegistryAuthentication(getRegistryAuthentication("builder", this.builderRegistry, DockerRegistryAuthentication.configuration(null))); } private BuilderDockerConfiguration customizePublishAuthentication(BuilderDockerConfiguration dockerConfiguration) { return dockerConfiguration .withPublishRegistryAuthentication(getRegistryAuthentication("publish", this.publishRegistry, DockerRegistryAuthentication.configuration(DockerRegistryAuthentication.EMPTY_USER))); } private DockerRegistryAuthentication getRegistryAuthentication(String type, @Nullable DockerRegistrySpec registry, DockerRegistryAuthentication fallback) { if (registry == null || registry.hasEmptyAuth()) { return fallback; } if (registry.hasTokenAuth() && !registry.hasUserAuth()) { return DockerRegistryAuthentication.token(registry.getToken().get()); } if (registry.hasUserAuth() && !registry.hasTokenAuth()) { return DockerRegistryAuthentication.user(registry.getUsername().get(), registry.getPassword().get(), registry.getUrl().getOrNull(), registry.getEmail().getOrNull()); } throw new GradleException("Invalid Docker " + type + " registry configuration, either token or username/password must be provided"); } /** * Encapsulates Docker registry authentication configuration options. */ public abstract static class DockerRegistrySpec { /** * Returns the username to use when authenticating to the Docker registry. * @return the registry username */ @Input @Optional public abstract Property<String> getUsername(); /** * Returns the password to use when authenticating to the Docker registry. * @return the registry password */ @Input @Optional public abstract Property<String> getPassword(); /** * Returns the Docker registry URL. * @return the registry URL */ @Input @Optional public abstract Property<String> getUrl(); /** * Returns the email address associated with the Docker registry username. * @return the registry email address */ @Input @Optional public abstract Property<String> getEmail(); /** * Returns the identity token to use when authenticating to the Docker registry. * @return the registry identity token */ @Input @Optional public abstract Property<String> getToken(); boolean hasEmptyAuth() { return nonePresent(getUsername(), getPassword(), getUrl(), getEmail(), getToken()); } private boolean nonePresent(Property<?>... properties) { for (Property<?> property : properties) { if (property.isPresent()) { return false; } } return true; } boolean hasUserAuth() { return allPresent(getUsername(), getPassword()); } private boolean allPresent(Property<?>... properties) { for (Property<?> property : properties) { if (!property.isPresent()) { return false; } } return true; } boolean hasTokenAuth() { return getToken().isPresent(); } } }
java
github
https://github.com/spring-projects/spring-boot
build-plugin/spring-boot-gradle-plugin/src/main/java/org/springframework/boot/gradle/tasks/bundling/DockerSpec.java
<div><p data-foo="barbaz">this is unstyled</p> <p class="svelte-xyz" data-foo="bazbar">this is styled</p></div>
html
github
https://github.com/sveltejs/svelte
packages/svelte/tests/css/samples/omit-scoping-attribute-attribute-selector-suffix/expected.html
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ System-level utilities and helper functions. """ def chunkreadable(iter, chunk_size=65536): """ Wrap a readable iterator with a reader yielding chunks of a preferred size, otherwise leave iterator unchanged. :param iter: an iter which may also be readable :param chunk_size: maximum size of chunk """ return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter def chunkiter(fp, chunk_size=65536): """ Return an iterator to a file-like obj which yields fixed size chunks :param fp: a file-like object :param chunk_size: maximum size of chunk """ while True: chunk = fp.read(chunk_size) if chunk: yield chunk else: break
unknown
codeparrot/codeparrot-clean
import urllib2 from HTMLParser import HTMLParser from traceback import print_exc from sys import stderr class _DeHTMLParser(HTMLParser): ''' 利用HTMLParse来解析网页元素 ''' def __init__(self): HTMLParser.__init__(self) self.img_links = [] def handle_starttag(self, tag, attrs): if tag == 'img': # print(attrs) try: if ('pic_type','0') in attrs: for name, value in attrs: if name == 'src': self.img_links.append(value) except Exception as e: print(e) return self.img_links def dehtml(text): try: parser = _DeHTMLParser() parser.feed(text) parser.close() return parser.img_links except: print_exc(file=stderr) return text def main(): html = urllib2.urlopen('http://tieba.baidu.com/p/2166231880') content = html.read() print(dehtml(content)) i = 0 for img_list in dehtml(content): img_content = urllib2.urlopen(img_list).read() path_name = str(i)+'.jpg' with open(path_name,'wb') as f: f.write(img_content) i+=1 if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python """ Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re import subprocess import string import sys from lib.core.enums import DBMS from lib.core.enums import DBMS_DIRECTORY_NAME from lib.core.enums import OS from lib.core.revision import getRevisionNumber # sqlmap version and site VERSION = "1.0-dev" REVISION = getRevisionNumber() VERSION_STRING = "sqlmap/%s%s" % (VERSION, "-%s" % REVISION if REVISION else "") DESCRIPTION = "automatic SQL injection and database takeover tool" SITE = "http://sqlmap.org" ISSUES_PAGE = "https://github.com/sqlmapproject/sqlmap/issues/new" GIT_REPOSITORY = "git://github.com/sqlmapproject/sqlmap.git" ML = "sqlmap-users@lists.sourceforge.net" # Minimum distance of ratio from kb.matchRatio to result in True DIFF_TOLERANCE = 0.05 CONSTANT_RATIO = 0.9 # Lower and upper values for match ratio in case of stable page LOWER_RATIO_BOUND = 0.02 UPPER_RATIO_BOUND = 0.98 # Markers for special cases when parameter values contain html encoded characters PARAMETER_AMP_MARKER = "__AMP__" PARAMETER_SEMICOLON_MARKER = "__SEMICOLON__" PARTIAL_VALUE_MARKER = "__PARTIAL_VALUE__" PARTIAL_HEX_VALUE_MARKER = "__PARTIAL_HEX_VALUE__" URI_QUESTION_MARKER = "__QUESTION_MARK__" ASTERISK_MARKER = "__ASTERISK_MARK__" REPLACEMENT_MARKER = "__REPLACEMENT_MARK__" PAYLOAD_DELIMITER = "__PAYLOAD_DELIMITER__" CHAR_INFERENCE_MARK = "%c" PRINTABLE_CHAR_REGEX = r"[^\x00-\x1f\x7f-\xff]" # Regular expression used for recognition of generic permission messages PERMISSION_DENIED_REGEX = r"(command|permission|access)\s*(was|is)?\s*denied" # Regular expression used for recognition of generic maximum connection messages MAX_CONNECTIONS_REGEX = r"max.+connections" # Regular expression used for extracting results from google search GOOGLE_REGEX = r"url\?\w+=((?![^>]+webcache\.googleusercontent\.com)http[^>]+)&(sa=U|rct=j)" # Regular expression used for extracting content from "textual" tags TEXT_TAG_REGEX = r"(?si)<(abbr|acronym|b|blockquote|br|center|cite|code|dt|em|font|h\d|i|li|p|pre|q|strong|sub|sup|td|th|title|tt|u)(?!\w).*?>(?P<result>[^<]+)" # Regular expression used for recognition of IP addresses IP_ADDRESS_REGEX = r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b" # Dumping characters used in GROUP_CONCAT MySQL technique CONCAT_ROW_DELIMITER = ',' CONCAT_VALUE_DELIMITER = '|' # Coefficient used for a time-based query delay checking (must be >= 7) TIME_STDEV_COEFF = 7 # Minimum response time that can be even considered as delayed (not a complete requirement) MIN_VALID_DELAYED_RESPONSE = 0.5 # Standard deviation after which a warning message should be displayed about connection lags WARN_TIME_STDEV = 0.5 # Minimum length of usable union injected response (quick defense against substr fields) UNION_MIN_RESPONSE_CHARS = 10 # Coefficient used for a union-based number of columns checking (must be >= 7) UNION_STDEV_COEFF = 7 # Length of queue for candidates for time delay adjustment TIME_DELAY_CANDIDATES = 3 # Default value for HTTP Accept header HTTP_ACCEPT_HEADER_VALUE = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" # Default value for HTTP Accept-Encoding header HTTP_ACCEPT_ENCODING_HEADER_VALUE = "gzip,deflate" # Default timeout for running commands over backdoor BACKDOOR_RUN_CMD_TIMEOUT = 5 # Maximum number of techniques used in inject.py/getValue() per one value MAX_TECHNIQUES_PER_VALUE = 2 # Suffix used for naming meta databases in DBMS(es) without explicit database name METADB_SUFFIX = "_masterdb" # Minimum time response set needed for time-comparison based on standard deviation MIN_TIME_RESPONSES = 15 # Minimum comparison ratio set needed for searching valid union column number based on standard deviation MIN_UNION_RESPONSES = 5 # After these number of blanks at the end inference should stop (just in case) INFERENCE_BLANK_BREAK = 10 # Use this replacement character for cases when inference is not able to retrieve the proper character value INFERENCE_UNKNOWN_CHAR = '?' # Character used for operation "greater" in inference INFERENCE_GREATER_CHAR = ">" # Character used for operation "equals" in inference INFERENCE_EQUALS_CHAR = "=" # Character used for operation "not-equals" in inference INFERENCE_NOT_EQUALS_CHAR = "!=" # String used for representation of unknown dbms UNKNOWN_DBMS = "Unknown" # String used for representation of unknown dbms version UNKNOWN_DBMS_VERSION = "Unknown" # Dynamicity mark length used in dynamicity removal engine DYNAMICITY_MARK_LENGTH = 32 # Dummy user prefix used in dictionary attack DUMMY_USER_PREFIX = "__dummy__" # Reference: http://en.wikipedia.org/wiki/ISO/IEC_8859-1 DEFAULT_PAGE_ENCODING = "iso-8859-1" # URL used in dummy runs DUMMY_URL = "http://foo/bar?id=1" # System variables IS_WIN = subprocess.mswindows # The name of the operating system dependent module imported. The following names have currently been registered: 'posix', 'nt', 'mac', 'os2', 'ce', 'java', 'riscos' PLATFORM = os.name PYVERSION = sys.version.split()[0] # DBMS system databases MSSQL_SYSTEM_DBS = ("Northwind", "master", "model", "msdb", "pubs", "tempdb") MYSQL_SYSTEM_DBS = ("information_schema", "mysql") # Before MySQL 5.0 only "mysql" PGSQL_SYSTEM_DBS = ("information_schema", "pg_catalog", "pg_toast") ORACLE_SYSTEM_DBS = ("CTXSYS", "DBSNMP", "DMSYS", "EXFSYS", "MDSYS", "OLAPSYS", "ORDSYS", "OUTLN", "SYS", "SYSAUX", "SYSMAN", "SYSTEM", "TSMSYS", "WMSYS", "XDB") # These are TABLESPACE_NAME SQLITE_SYSTEM_DBS = ("sqlite_master", "sqlite_temp_master") ACCESS_SYSTEM_DBS = ("MSysAccessObjects", "MSysACEs", "MSysObjects", "MSysQueries", "MSysRelationships", "MSysAccessStorage",\ "MSysAccessXML", "MSysModules", "MSysModules2") FIREBIRD_SYSTEM_DBS = ("RDB$BACKUP_HISTORY", "RDB$CHARACTER_SETS", "RDB$CHECK_CONSTRAINTS", "RDB$COLLATIONS", "RDB$DATABASE",\ "RDB$DEPENDENCIES", "RDB$EXCEPTIONS", "RDB$FIELDS", "RDB$FIELD_DIMENSIONS", " RDB$FILES", "RDB$FILTERS",\ "RDB$FORMATS", "RDB$FUNCTIONS", "RDB$FUNCTION_ARGUMENTS", "RDB$GENERATORS", "RDB$INDEX_SEGMENTS", "RDB$INDICES",\ "RDB$LOG_FILES", "RDB$PAGES", "RDB$PROCEDURES", "RDB$PROCEDURE_PARAMETERS", "RDB$REF_CONSTRAINTS", "RDB$RELATIONS",\ "RDB$RELATION_CONSTRAINTS", "RDB$RELATION_FIELDS", "RDB$ROLES", "RDB$SECURITY_CLASSES", "RDB$TRANSACTIONS", "RDB$TRIGGERS",\ "RDB$TRIGGER_MESSAGES", "RDB$TYPES", "RDB$USER_PRIVILEGES", "RDB$VIEW_RELATIONS") MAXDB_SYSTEM_DBS = ("SYSINFO", "DOMAIN") SYBASE_SYSTEM_DBS = ("master", "model", "sybsystemdb", "sybsystemprocs") DB2_SYSTEM_DBS = ("NULLID", "SQLJ", "SYSCAT", "SYSFUN", "SYSIBM", "SYSIBMADM", "SYSIBMINTERNAL", "SYSIBMTS",\ "SYSPROC", "SYSPUBLIC", "SYSSTAT", "SYSTOOLS") HSQLDB_SYSTEM_DBS = ("INFORMATION_SCHEMA", "SYSTEM_LOB") MSSQL_ALIASES = ("microsoft sql server", "mssqlserver", "mssql", "ms") MYSQL_ALIASES = ("mysql", "my") PGSQL_ALIASES = ("postgresql", "postgres", "pgsql", "psql", "pg") ORACLE_ALIASES = ("oracle", "orcl", "ora", "or") SQLITE_ALIASES = ("sqlite", "sqlite3") ACCESS_ALIASES = ("msaccess", "access", "jet", "microsoft access") FIREBIRD_ALIASES = ("firebird", "mozilla firebird", "interbase", "ibase", "fb") MAXDB_ALIASES = ("maxdb", "sap maxdb", "sap db") SYBASE_ALIASES = ("sybase", "sybase sql server") DB2_ALIASES = ("db2", "ibm db2", "ibmdb2") HSQLDB_ALIASES = ("hsql", "hsqldb", "hs", "hypersql") DBMS_DIRECTORY_DICT = dict((getattr(DBMS, _), getattr(DBMS_DIRECTORY_NAME, _)) for _ in dir(DBMS) if not _.startswith("_")) SUPPORTED_DBMS = MSSQL_ALIASES + MYSQL_ALIASES + PGSQL_ALIASES + ORACLE_ALIASES + SQLITE_ALIASES + ACCESS_ALIASES + FIREBIRD_ALIASES + MAXDB_ALIASES + SYBASE_ALIASES + DB2_ALIASES + HSQLDB_ALIASES SUPPORTED_OS = ("linux", "windows") USER_AGENT_ALIASES = ("ua", "useragent", "user-agent") REFERER_ALIASES = ("ref", "referer", "referrer") HOST_ALIASES = ("host",) # Items displayed in basic help (-h) output BASIC_HELP_ITEMS = ( "url", "googleDork", "data", "cookie", "randomAgent", "proxy", "testParameter", "dbms", "level", "risk", "tech", "getAll", "getBanner", "getCurrentUser", "getCurrentDb", "getPasswordHashes", "getTables", "getColumns", "getSchema", "dumpTable", "dumpAll", "db", "tbl", "col", "osShell", "osPwn", "batch", "checkTor", "flushSession", "tor", "wizard", ) # String representation for NULL value NULL = "NULL" # String representation for blank ('') value BLANK = "<blank>" # String representation for current database CURRENT_DB = "CD" # Regular expressions used for parsing error messages (--parse-errors) ERROR_PARSING_REGEXES = ( r"<b>[^<]*(fatal|error|warning|exception)[^<]*</b>:?\s*(?P<result>.+?)<br\s*/?\s*>", r"(?m)^(fatal|error|warning|exception):?\s*(?P<result>.+?)$", r"<li>Error Type:<br>(?P<result>.+?)</li>", r"error '[0-9a-f]{8}'((<[^>]+>)|\s)+(?P<result>[^<>]+)", ) # Regular expression used for parsing charset info from meta html headers META_CHARSET_REGEX = r'(?si)<head>.*<meta http-equiv="?content-type"?[^>]+charset=(?P<result>[^">]+).*</head>' # Regular expression used for parsing refresh info from meta html headers META_REFRESH_REGEX = r'(?si)<head>.*<meta http-equiv="?refresh"?[^>]+content="?[^">]+url=(?P<result>[^">]+).*</head>' # Regular expression used for parsing empty fields in tested form data EMPTY_FORM_FIELDS_REGEX = r'(&|\A)(?P<result>[^=]+=(&|\Z))' # Reference: http://www.cs.ru.nl/bachelorscripties/2010/Martin_Devillers___0437999___Analyzing_password_strength.pdf COMMON_PASSWORD_SUFFIXES = ("1", "123", "2", "12", "3", "13", "7", "11", "5", "22", "23", "01", "4", "07", "21", "14", "10", "06", "08", "8", "15", "69", "16", "6", "18") # Reference: http://www.the-interweb.com/serendipity/index.php?/archives/94-A-brief-analysis-of-40,000-leaked-MySpace-passwords.html COMMON_PASSWORD_SUFFIXES += ("!", ".", "*", "!!", "?", ";", "..", "!!!", ", ", "@") # Splitter used between requests in WebScarab log files WEBSCARAB_SPLITTER = "### Conversation" # Splitter used between requests in BURP log files BURP_REQUEST_REGEX = r"={10,}\s+[^=]+={10,}\s(.+?)\s={10,}" # Regex used for parsing XML Burp saved history items BURP_XML_HISTORY_REGEX = r'<request base64="true"><!\[CDATA\[([^]]+)' # Encoding used for Unicode data UNICODE_ENCODING = "utf8" # Reference: http://www.w3.org/Protocols/HTTP/Object_Headers.html#uri URI_HTTP_HEADER = "URI" # Uri format which could be injectable (e.g. www.site.com/id82) URI_INJECTABLE_REGEX = r"//[^/]*/([^\.*?]+)\Z" # Regex used for masking sensitive data SENSITIVE_DATA_REGEX = "(\s|=)(?P<result>[^\s=]*%s[^\s]*)\s" # Maximum number of threads (avoiding connection issues and/or DoS) MAX_NUMBER_OF_THREADS = 10 # Minimum range between minimum and maximum of statistical set MIN_STATISTICAL_RANGE = 0.01 # Minimum value for comparison ratio MIN_RATIO = 0.0 # Maximum value for comparison ratio MAX_RATIO = 1.0 # Character used for marking injectable position inside provided data CUSTOM_INJECTION_MARK_CHAR = '*' # Other way to declare injection position INJECT_HERE_MARK = '%INJECT HERE%' # Maximum length used for retrieving data over MySQL error based payload due to "known" problems with longer result strings MYSQL_ERROR_CHUNK_LENGTH = 50 # Maximum length used for retrieving data over MSSQL error based payload due to trimming problems with longer result strings MSSQL_ERROR_CHUNK_LENGTH = 100 # Do not escape the injected statement if it contains any of the following SQL keywords EXCLUDE_UNESCAPE = ("WAITFOR DELAY ", " INTO DUMPFILE ", " INTO OUTFILE ", "CREATE ", "BULK ", "EXEC ", "RECONFIGURE ", "DECLARE ", "'%s'" % CHAR_INFERENCE_MARK) # Mark used for replacement of reflected values REFLECTED_VALUE_MARKER = "__REFLECTED_VALUE__" # Regular expression used for replacing border non-alphanum characters REFLECTED_BORDER_REGEX = r"[^A-Za-z]+" # Regular expression used for replacing non-alphanum characters REFLECTED_REPLACEMENT_REGEX = r".+?" # Maximum number of alpha-numerical parts in reflected regex (for speed purposes) REFLECTED_MAX_REGEX_PARTS = 10 # Chars which can be used as a failsafe values in case of too long URL encoding value URLENCODE_FAILSAFE_CHARS = "()|," # Maximum length of URL encoded value after which failsafe procedure takes away URLENCODE_CHAR_LIMIT = 2000 # Default schema for Microsoft SQL Server DBMS DEFAULT_MSSQL_SCHEMA = "dbo" # Display hash attack info every mod number of items HASH_MOD_ITEM_DISPLAY = 11 # Maximum integer value MAX_INT = sys.maxint # Options that need to be restored in multiple targets run mode RESTORE_MERGED_OPTIONS = ("col", "db", "dnsName", "privEsc", "tbl", "regexp", "string", "textOnly", "threads", "timeSec", "tmpPath", "uChar", "user") # Parameters to be ignored in detection phase (upper case) IGNORE_PARAMETERS = ("__VIEWSTATE", "__VIEWSTATEENCRYPTED", "__EVENTARGUMENT", "__EVENTTARGET", "__EVENTVALIDATION", "ASPSESSIONID", "ASP.NET_SESSIONID", "JSESSIONID", "CFID", "CFTOKEN") # Regular expression used for recognition of ASP.NET control parameters ASP_NET_CONTROL_REGEX = r"(?i)\Actl\d+\$" # Turn off resume console info to avoid potential slowdowns TURN_OFF_RESUME_INFO_LIMIT = 20 # Strftime format for results file used in multiple target mode RESULTS_FILE_FORMAT = "results-%m%d%Y_%I%M%p.csv" # Official web page with the list of Python supported codecs CODECS_LIST_PAGE = "http://docs.python.org/library/codecs.html#standard-encodings" # Simple regular expression used to distinguish scalar from multiple-row commands (not sole condition) SQL_SCALAR_REGEX = r"\A(SELECT(?!\s+DISTINCT\(?))?\s*\w*\(" # IP address of the localhost LOCALHOST = "127.0.0.1" # Default port used by Tor DEFAULT_TOR_SOCKS_PORT = 9050 # Default ports used in Tor proxy bundles DEFAULT_TOR_HTTP_PORTS = (8123, 8118) # Percentage below which comparison engine could have problems LOW_TEXT_PERCENT = 20 # These MySQL keywords can't go (alone) into versioned comment form (/*!...*/) # Reference: http://dev.mysql.com/doc/refman/5.1/en/function-resolution.html IGNORE_SPACE_AFFECTED_KEYWORDS = ("CAST", "COUNT", "EXTRACT", "GROUP_CONCAT", "MAX", "MID", "MIN", "SESSION_USER", "SUBSTR", "SUBSTRING", "SUM", "SYSTEM_USER", "TRIM") LEGAL_DISCLAIMER = "Usage of sqlmap for attacking targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local, state and federal laws. Developers assume no liability and are not responsible for any misuse or damage caused by this program" # After this number of misses reflective removal mechanism is turned off (for speed up reasons) REFLECTIVE_MISS_THRESHOLD = 20 # Regular expression used for extracting HTML title HTML_TITLE_REGEX = "<title>(?P<result>[^<]+)</title>" # Table used for Base64 conversion in WordPress hash cracking routine ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" # Chars used to quickly distinguish if the user provided tainted parameter values DUMMY_SQL_INJECTION_CHARS = ";()'" # Simple check against dummy users DUMMY_USER_INJECTION = r"(?i)[^\w](AND|OR)\s+[^\s]+[=><]|\bUNION\b.+\bSELECT\b" # Extensions skipped by crawler CRAWL_EXCLUDE_EXTENSIONS = ("gif", "jpg", "jpeg", "image", "jar", "tif", "bmp", "war", "ear", "mpg", "mpeg", "wmv", "mpeg", "scm", "iso", "dmp", "dll", "cab", "so", "avi", "mkv", "bin", "iso", "tar", "png", "pdf", "ps", "wav", "mp3", "mp4", "au", "aiff", "aac", "zip", "rar", "7z", "gz", "flv", "mov") # Patterns often seen in HTTP headers containing custom injection marking character PROBLEMATIC_CUSTOM_INJECTION_PATTERNS = r"(\bq=[^;']+)|(\*/\*)" # Template used for common table existence check BRUTE_TABLE_EXISTS_TEMPLATE = "EXISTS(SELECT %d FROM %s)" # Template used for common column existence check BRUTE_COLUMN_EXISTS_TEMPLATE = "EXISTS(SELECT %s FROM %s)" # Payload used for checking of existence of IDS/WAF (dummier the better) IDS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,2,3,table_name FROM information_schema.tables WHERE 2>1" # Vectors used for provoking specific WAF/IDS/IPS behavior(s) WAF_ATTACK_VECTORS = ( "", # NIL "search=<script>alert(1)</script>", "file=../../../../etc/passwd", "q=<invalid>foobar", "id=1 %s" % IDS_WAF_CHECK_PAYLOAD ) # Used for status representation in dictionary attack phase ROTATING_CHARS = ('\\', '|', '|', '/', '-') # Chunk length (in items) used by BigArray objects (only last chunk and cached one are held in memory) BIGARRAY_CHUNK_LENGTH = 4096 # Only console display last n table rows TRIM_STDOUT_DUMP_SIZE = 256 # Parse response headers only first couple of times PARSE_HEADERS_LIMIT = 3 # Step used in ORDER BY technique used for finding the right number of columns in UNION query injections ORDER_BY_STEP = 10 # Maximum number of times for revalidation of a character in time-based injections MAX_TIME_REVALIDATION_STEPS = 5 # Characters that can be used to split parameter values in provided command line (e.g. in --tamper) PARAMETER_SPLITTING_REGEX = r'[,|;]' # Regular expression describing possible union char value (e.g. used in --union-char) UNION_CHAR_REGEX = r'\A\w+\Z' # Attribute used for storing original parameter value in special cases (e.g. POST) UNENCODED_ORIGINAL_VALUE = 'original' # Common column names containing usernames (used for hash cracking in some cases) COMMON_USER_COLUMNS = ('user', 'username', 'user_name', 'benutzername', 'benutzer', 'utilisateur', 'usager', 'consommateur', 'utente', 'utilizzatore', 'usufrutuario', 'korisnik', 'usuario', 'consumidor') # Default delimiter in GET/POST values DEFAULT_GET_POST_DELIMITER = '&' # Default delimiter in cookie values DEFAULT_COOKIE_DELIMITER = ';' # Unix timestamp used for forcing cookie expiration when provided with --load-cookies FORCE_COOKIE_EXPIRATION_TIME = "9999999999" # Skip unforced HashDB flush requests below the threshold number of cached items HASHDB_FLUSH_THRESHOLD = 32 # Number of retries for unsuccessful HashDB flush attempts HASHDB_FLUSH_RETRIES = 3 # Unique milestone value used for forced deprecation of old HashDB values (e.g. when changing hash/pickle mechanism) HASHDB_MILESTONE_VALUE = "cAWxkLYCQT" # r5129 "".join(random.sample(string.ascii_letters, 10)) # Warn user of possible delay due to large page dump in full UNION query injections LARGE_OUTPUT_THRESHOLD = 1024 ** 2 # On huge tables there is a considerable slowdown if every row retrieval requires ORDER BY (most noticable in table dumping using ERROR injections) SLOW_ORDER_COUNT_THRESHOLD = 10000 # Give up on hash recognition if nothing was found in first given number of rows HASH_RECOGNITION_QUIT_THRESHOLD = 10000 # Maximum number of redirections to any single URL - this is needed because of the state that cookies introduce MAX_SINGLE_URL_REDIRECTIONS = 4 # Maximum total number of redirections (regardless of URL) - before assuming we're in a loop MAX_TOTAL_REDIRECTIONS = 10 # Reference: http://www.tcpipguide.com/free/t_DNSLabelsNamesandSyntaxRules.htm MAX_DNS_LABEL = 63 # Alphabet used for prefix and suffix strings of name resolution requests in DNS technique (excluding hexadecimal chars for not mixing with inner content) DNS_BOUNDARIES_ALPHABET = re.sub("[a-fA-F]", "", string.ascii_letters) # Alphabet used for heuristic checks HEURISTIC_CHECK_ALPHABET = ('"', '\'', ')', '(', '[', ']', ',', '.') # Connection chunk size (processing large responses in chunks to avoid MemoryError crashes - e.g. large table dump in full UNION injections) MAX_CONNECTION_CHUNK_SIZE = 10 * 1024 * 1024 # Maximum response total page size (trimmed if larger) MAX_CONNECTION_TOTAL_SIZE = 100 * 1024 * 1024 # Maximum (multi-threaded) length of entry in bisection algorithm MAX_BISECTION_LENGTH = 50 * 1024 * 1024 # Mark used for trimming unnecessary content in large chunks LARGE_CHUNK_TRIM_MARKER = "__TRIMMED_CONTENT__" # Generic SQL comment formation GENERIC_SQL_COMMENT = "-- " # Threshold value for turning back on time auto-adjustment mechanism VALID_TIME_CHARS_RUN_THRESHOLD = 100 # Check for empty columns only if table is sufficiently large CHECK_ZERO_COLUMNS_THRESHOLD = 10 # Boldify all logger messages containing these "patterns" BOLD_PATTERNS = ("' injectable", "might be injectable", "' is vulnerable", "is not injectable", "test failed", "test passed", "live test final result", "test shows that") # Generic www root directory names GENERIC_DOC_ROOT_DIRECTORY_NAMES = ("htdocs", "httpdocs", "public", "wwwroot", "www") # Maximum length of a help part containing switch/option name(s) MAX_HELP_OPTION_LENGTH = 18 # Maximum number of connection retries (to prevent problems with recursion) MAX_CONNECT_RETRIES = 100 # Strings for detecting formatting errors FORMAT_EXCEPTION_STRINGS = ("Type mismatch", "Error converting", "Failed to convert", "System.FormatException", "java.lang.NumberFormatException") # Regular expression used for extracting ASP.NET view state values VIEWSTATE_REGEX = r'(?i)(?P<name>__VIEWSTATE[^"]*)[^>]+value="(?P<result>[^"]+)' # Regular expression used for extracting ASP.NET event validation values EVENTVALIDATION_REGEX = r'(?i)(?P<name>__EVENTVALIDATION[^"]*)[^>]+value="(?P<result>[^"]+)' # Number of rows to generate inside the full union test for limited output (mustn't be too large to prevent payload length problems) LIMITED_ROWS_TEST_NUMBER = 15 # Format used for representing invalid unicode characters INVALID_UNICODE_CHAR_FORMAT = r"\?%02x" # Regular expression for SOAP-like POST data SOAP_RECOGNITION_REGEX = r"(?s)\A(<\?xml[^>]+>)?\s*<([^> ]+)( [^>]+)?>.+</\2.*>\s*\Z" # Regular expression used for detecting JSON-like POST data JSON_RECOGNITION_REGEX = r'(?s)\A(\s*\[)*\s*\{.*"[^"]+"\s*:\s*("[^"]+"|\d+).*\}\s*(\]\s*)*\Z' # Regular expression used for detecting multipart POST data MULTIPART_RECOGNITION_REGEX = r"(?i)Content-Disposition:[^;]+;\s*name=" # Default POST data content-type DEFAULT_CONTENT_TYPE = "application/x-www-form-urlencoded; charset=utf-8" # Raw text POST data content-type PLAIN_TEXT_CONTENT_TYPE = "text/plain; charset=utf-8" # Length used while checking for existence of Suhosin-patch (like) protection mechanism SUHOSIN_MAX_VALUE_LENGTH = 512 # Minimum size of an (binary) entry before it can be considered for dumping to disk MIN_BINARY_DISK_DUMP_SIZE = 100 # Regular expression used for extracting form tags FORM_SEARCH_REGEX = r"(?si)<form(?!.+<form).+?</form>" # Minimum field entry length needed for encoded content (hex, base64,...) check MIN_ENCODED_LEN_CHECK = 5 # Timeout in seconds in which Metasploit remote session has to be initialized METASPLOIT_SESSION_TIMEOUT = 180 # Reference: http://www.cookiecentral.com/faq/#3.5 NETSCAPE_FORMAT_HEADER_COOKIES = "# Netscape HTTP Cookie File." # Prefixes used in brute force search for web server document root BRUTE_DOC_ROOT_PREFIXES = { OS.LINUX: ("/var/www", "/var/www/%TARGET%", "/var/www/vhosts/%TARGET%", "/var/www/virtual/%TARGET%", "/var/www/clients/vhosts/%TARGET%", "/var/www/clients/virtual/%TARGET%"), OS.WINDOWS: ("/xampp", "/Program Files/xampp/", "/wamp", "/Program Files/wampp/", "/Inetpub/wwwroot", "/Inetpub/wwwroot/%TARGET%", "/Inetpub/vhosts/%TARGET%") } # Suffixes used in brute force search for web server document root BRUTE_DOC_ROOT_SUFFIXES = ("", "html", "htdocs", "httpdocs", "php", "public", "src", "site", "build", "web", "sites/all", "www/build") # String used for marking target name inside used brute force web server document root BRUTE_DOC_ROOT_TARGET_MARK = "%TARGET%" # Character used as a boundary in kb.chars (preferably less frequent letter) KB_CHARS_BOUNDARY_CHAR = 'q' # CSS style used in HTML dump format HTML_DUMP_CSS_STYLE = """<style> table{ margin:10; background-color:#FFFFFF; font-family:verdana; font-size:12px; align:center; } thead{ font-weight:bold; background-color:#4F81BD; color:#FFFFFF; } tr:nth-child(even) { background-color: #D3DFEE } td{ font-size:10px; } th{ font-size:10px; } </style>"""
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright(C) 2010-2011 Romain Bignon # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. import re from logging import warning from weboob.browser.pages import HTMLPage, LoggedPage class Message(object): TIMESTAMP_REGEXP = re.compile(r'(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})') def __init__(self, id, timestamp, login, message, is_me): self.id = id self.timestamp = timestamp self.login = login self.message = message self.is_me = is_me self.norloge = timestamp m = self.TIMESTAMP_REGEXP.match(timestamp) if m: self.norloge = '%02d:%02d:%02d' % (int(m.group(4)), int(m.group(5)), int(m.group(6))) else: warning('Unable to parse timestamp "%s"' % timestamp) class BoardIndexPage(LoggedPage, HTMLPage): def get_messages(self, last=None): msgs = [] for post in self.doc.xpath('//post'): m = Message(int(post.attrib['id']), post.attrib['time'], post.find('login').text, post.find('message').text, post.find('login').text.lower() == self.browser.username.lower()) if last is not None and last == m.id: break msgs.append(m) return msgs
unknown
codeparrot/codeparrot-clean
{ "favorite": { "favoriteDags_one": "Eerste {{count}} favoriete Dags", "favoriteDags_other": "Eerste {{count}} favoriete Dags", "noDagRuns": "Er is nog geen Dag Run voor deze Dag.", "noFavoriteDags": "Geen favorieten. Klik op het sterpictogram naast een Dag in de lijst om deze aan je favorieten toe te voegen." }, "group": "Groep", "health": { "dagProcessor": "Dag Processor", "health": "Status", "healthy": "OK", "lastHeartbeat": "Laatste hartslag", "metaDatabase": "MetaDatabase", "scheduler": "Scheduler", "status": "Status", "triggerer": "Triggerer", "unhealthy": "Fout" }, "history": "Geschiedenis", "importErrors": { "dagImportError_one": "Dag leesfout", "dagImportError_other": "Dag leesfouten", "searchByFile": "Zoek op bestand", "timestamp": "Tijd" }, "managePools": "Beheer Pools", "noAssetEvents": "Geen Asset Events gevonden.", "poolSlots": "Pool Slots", "sortBy": { "newestFirst": "Nieuwste eerst", "oldestFirst": "Oudste eerst", "placeholder": "Sorteer op" }, "source": "Bron", "stats": { "activeDags": "Actieve Dags", "failedDags": "Mislukte Dags", "queuedDags": "Dags in de wachtrij", "requiredActions": "Vereiste acties", "runningDags": "Lopende Dags", "stats": "Statistieken" }, "uri": "Uri", "welcome": "Welkom" }
json
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/public/i18n/locales/nl/dashboard.json
#!/usr/bin/env python ''' License of pdfminer lzw package: Copyright (c) 2004-2010 Yusuke Shinyama <yusuke at cs dot nyu dot edu> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import sys from sys import stderr try: from cStringIO import StringIO except ImportError: from StringIO import StringIO ## LZWDecoder ## class LZWDecoder(object): debug = 0 def __init__(self, fp): self.fp = fp self.buff = 0 self.bpos = 8 self.nbits = 9 self.table = None self.prevbuf = None return def readbits(self, bits): v = 0 while 1: # the number of remaining bits we can get from the current buffer. r = 8-self.bpos if bits <= r: # |-----8-bits-----| # |-bpos-|-bits-| | # | |----r----| v = (v<<bits) | ((self.buff>>(r-bits)) & ((1<<bits)-1)) self.bpos += bits break else: # |-----8-bits-----| # |-bpos-|---bits----... # | |----r----| v = (v<<r) | (self.buff & ((1<<r)-1)) bits -= r x = self.fp.read(1) if not x: raise EOFError self.buff = ord(x) self.bpos = 0 return v def feed(self, code): x = '' if code == 256: self.table = [ chr(c) for c in xrange(256) ] # 0-255 self.table.append(None) # 256 self.table.append(None) # 257 self.prevbuf = '' self.nbits = 9 elif code == 257: pass elif not self.prevbuf: x = self.prevbuf = self.table[code] else: if code < len(self.table): x = self.table[code] self.table.append(self.prevbuf+x[0]) else: self.table.append(self.prevbuf+self.prevbuf[0]) x = self.table[code] l = len(self.table) if l == 511: self.nbits = 10 elif l == 1023: self.nbits = 11 elif l == 2047: self.nbits = 12 self.prevbuf = x return x def run(self): while 1: try: code = self.readbits(self.nbits) except EOFError: break x = self.feed(code) yield x if self.debug: print >>stderr, ('nbits=%d, code=%d, output=%r, table=%r' % (self.nbits, code, x, self.table[258:])) return # lzwdecode def lzwdecode(data): """ >>> lzwdecode('\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01') '\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42' """ fp = StringIO(data) return ''.join(LZWDecoder(fp).run()) if __name__ == '__main__': import doctest doctest.testmod()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.client.tests.utils import kotlinx.atomicfu.atomic import kotlin.test.assertEquals internal class LogMatcher( private val originPredicates: MutableList<LogPredicate> ) { private val stashed = originPredicates.filterIsInstance<Somewhere>().map { it.value }.toMutableSet() private val predicates = originPredicates.filter { it !is Somewhere } private var index = 0 private val log = mutableListOf<String>() private var matchFailCause: Throwable? by atomic(null) fun match(value: String) { matchFailCause?.let { throw it } val cause = kotlin.runCatching { for (line in value.lines()) { log += line matchLine(line) } }.exceptionOrNull() ?: return matchFailCause = cause } private fun matchLine(value: String) { if (value in stashed) { stashed.remove(value) return } if (index >= predicates.size) { fail("Too many lines logged") } when (val predicate = predicates[index++]) { is Changing -> return is Line -> if (predicate.value.lowercase() != value.lowercase()) { fail("Line doesn't match") } is Optional -> { if (predicate.value.lowercase() == value.lowercase()) return matchLine(value) } is Somewhere -> error("It's impossible") } } private fun renderExpectedLog() = buildString { for (predicate in originPredicates) { appendLine(predicate) } } fun finish() { matchFailCause?.let { throw it } if (predicates.size != index) { fail("Log size doesn't match") } if (stashed.isNotEmpty()) { fail("Not all stashed matched") } } private fun fail(message: String) { assertEquals(renderExpectedLog(), log.joinToString("\n"), message) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-client/ktor-client-tests/common/test/io/ktor/client/tests/utils/LogMatcher.kt
from modules.base_module import BaseModule class FcqCardModule(BaseModule): fcq_ids = [] def render(self, fcq_ids, color): self.fcq_ids = fcq_ids self.fcq_ids.reverse() chunks = [self.fcq_ids[x:x + 6] for x in range(0, len(self.fcq_ids), 6)] return self.render_string( 'modules/FcqCollection.html', chunks=chunks, fcq_ids=fcq_ids, fcq_title=self.fcq_title, convert_date=self.convert_date, color=color) def embedded_javascript(self): javascript = "" for fcq_id in self.fcq_ids: javascript += ''' $("#card-{0}").one( "click", function(){{ $( "#body-{0}" ).load( "/ajax/fcqcard/{0}", function(){{ $( "#nav-{0} :not(.disabled) a").click(function (e) {{ e.preventDefault(); $(this).tab('show'); console.log(e); }}); }}); }}); '''.format(fcq_id) return javascript
unknown
codeparrot/codeparrot-clean
# coding: utf-8 import os import re import subprocess import time from cassandra.util import sortedset from ccmlib import common from dtest import Tester, debug, DISABLE_VNODES from tools import rows_to_list, since @since('2.0.16', max_version='3.0.0') class TestTokenGenerator(Tester): """ Basic tools/bin/token-generator test. Token-generator was removed in CASSANDRA-5261 @jira_ticket CASSANDRA-5261 @jira_ticket CASSANDRA-9300 """ def call_token_generator(self, install_dir, randomPart, nodes): executable = os.path.join(install_dir, 'tools', 'bin', 'token-generator') if common.is_win(): executable += ".bat" args = [executable] if randomPart is not None: if randomPart: args.append("--random") else: args.append("--murmur3") for n in nodes: args.append(str(n)) debug('Invoking {}'.format(args)) token_gen_output = subprocess.check_output(args) lines = token_gen_output.split("\n") dc_tokens = None generated_tokens = [] for line in lines: if line.startswith("DC #"): if dc_tokens is not None: self.assertGreater(dc_tokens.__len__(), 0, "dc_tokens is empty from token-generator %r" % args) generated_tokens.append(dc_tokens) dc_tokens = [] else: if line.__len__() > 0: m = re.search("^ Node #(\d+): [ ]*([-]?\d+)$", line) self.assertIsNotNone(m, "Line \"%r\" does not match pattern from token-generator %r" % (line, args)) node_num = int(m.group(1)) node_token = int(m.group(2)) dc_tokens.append(node_token) self.assertEqual(node_num, dc_tokens.__len__(), "invalid token count from token-generator %r" % args) self.assertIsNotNone(dc_tokens, "No tokens from token-generator %r" % args) self.assertGreater(dc_tokens.__len__(), 0, "No tokens from token-generator %r" % args) generated_tokens.append(dc_tokens) return generated_tokens def prepare(self, randomPart=None, nodes=1): cluster = self.cluster install_dir = cluster.get_install_dir() generated_tokens = self.call_token_generator(install_dir, randomPart, [nodes]) if not randomPart: cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner") else: if randomPart: cluster.set_partitioner("org.apache.cassandra.dht.RandomPartitioner") else: cluster.set_partitioner("org.apache.cassandra.dht.Murmur3Partitioner") # remove these from cluster options - otherwise node's config would be overridden with cluster._config_options_ cluster._config_options.__delitem__('num_tokens') if not DISABLE_VNODES: cluster._config_options.__delitem__('initial_token') self.assertTrue(not cluster.nodelist(), "nodelist() already initialized") cluster.populate(nodes, use_vnodes=False, tokens=generated_tokens[0]).start(wait_for_binary_proto=True) time.sleep(0.2) node = cluster.nodelist()[0] session = self.patient_cql_connection(node) return generated_tokens, session def _token_gen_test(self, nodes, randomPart=None): generated_tokens, session = self.prepare(randomPart, nodes=nodes) dc_tokens = generated_tokens[0] tokens = [] local_tokens = rows_to_list(session.execute("SELECT tokens FROM system.local"))[0] self.assertEqual(local_tokens.__len__(), 1, "too many tokens for peer") for tok in local_tokens: tokens += tok rows = rows_to_list(session.execute("SELECT tokens FROM system.peers")) self.assertEqual(rows.__len__(), nodes - 1) for row in rows: peer_tokens = row[0] self.assertEqual(peer_tokens.__len__(), 1, "too many tokens for peer") for tok in peer_tokens: tokens.append(tok) self.assertEqual(tokens.__len__(), dc_tokens.__len__()) for cluster_token in tokens: tok = int(cluster_token) self.assertGreaterEqual(dc_tokens.index(tok), 0, "token in cluster does not match generated tokens") def token_gen_def_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with default token-generator behavior """ self._token_gen_test(nodes) def token_gen_murmur3_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with explicit murmur3 """ self._token_gen_test(nodes, False) def token_gen_random_test(self, nodes=3): """ Validate token-generator with Murmur3Partitioner with explicit random """ self._token_gen_test(nodes, True) dc_nodes_combinations = [ [3, 5], [3, 5, 5], [12, 5, 7], [50, 100, 250], [100, 100, 100], [250, 250, 250], [1000, 1000, 1000], [2500, 2500, 2500, 2500] ] def _multi_dc_tokens(self, random=None): t_min = 0 t_max = 1 << 127 if random is None or not random: t_min = -1 << 63 t_max = 1 << 63 for dc_nodes in self.dc_nodes_combinations: all_tokens = sortedset() node_count = 0 generated_tokens = self.call_token_generator(self.cluster.get_install_dir(), random, dc_nodes) self.assertEqual(dc_nodes.__len__(), generated_tokens.__len__()) for n in range(0, dc_nodes.__len__()): nodes = dc_nodes[n] node_count += nodes tokens = generated_tokens[n] self.assertEqual(nodes, tokens.__len__()) for tok in tokens: self.assertTrue(t_min <= tok < t_max, "Generated token %r out of Murmur3Partitioner range %r..%r" % (tok, t_min, t_max - 1)) self.assertTrue(not all_tokens.__contains__(tok), "Duplicate token %r for nodes-counts %r" % (tok, dc_nodes)) all_tokens.add(tok) self.assertEqual(all_tokens.__len__(), node_count, "Number of tokens %r and number of nodes %r does not match for %r" % (all_tokens.__len__(), node_count, dc_nodes)) def multi_dc_tokens_default_test(self): self._multi_dc_tokens() def multi_dc_tokens_murmur3_test(self): self._multi_dc_tokens(False) def multi_dc_tokens_random_test(self): self._multi_dc_tokens(True)
unknown
codeparrot/codeparrot-clean
export default "b2";
javascript
github
https://github.com/webpack/webpack
test/cases/chunks/circular-correctness/module-b2.js
#!/usr/bin/env python # # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This is used to test the findbugs plugin, it calls # build/android/pylib/utils/findbugs.py to analyze the classes in # org.chromium.tools.findbugs.plugin package, and expects to get the same # issue with those in expected_result.txt. # # Useful command line: # --rebaseline to generate the expected_result.txt, please make sure don't # remove the expected result of exsting tests. import optparse import os import sys sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', 'build', 'android'))) from pylib import constants from pylib.utils import findbugs def main(argv): parser = findbugs.GetCommonParser() options, _ = parser.parse_args() if not options.known_bugs: options.known_bugs = os.path.join(constants.CHROME_DIR, 'tools', 'android', 'findbugs_plugin', 'test', 'expected_result.txt') if not options.only_analyze: options.only_analyze = 'org.chromium.tools.findbugs.plugin.*' return findbugs.Run(options) if __name__ == '__main__': sys.exit(main(sys.argv))
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.client.plugins.sse import io.ktor.sse.* /** * Policy that controls how an SSE diagnostic buffer is captured while reading a stream. * * The buffer represents already processed data (no re-reading from the network). * It is intended for logging and error analysis when failures happen. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy) */ public sealed interface SSEBufferPolicy { /** * Disable buffer capture. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy.Off) */ public data object Off : SSEBufferPolicy /** * Keep the last [count] completed SSE events in the diagnostic buffer. * * The session appends an event when it encounters an empty line (SSE event boundary). * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy.LastEvents) */ public data class LastEvents(val count: Int) : SSEBufferPolicy { init { require(count > 0) { "Count must be > 0" } } } /** * Keep the last [count] text lines of the stream in the buffer. * Includes blank lines that delimit SSE events, comment lines, etc.). * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy.LastLines) */ public data class LastLines(val count: Int) : SSEBufferPolicy { init { require(count > 0) { "Count must be > 0" } } } /** * Keep only the last completed event. * Shorthand for `LastEvents(1)`. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy.LastEvent) */ public data object LastEvent : SSEBufferPolicy /** * Keep everything that has been processed so far. * * Note: Use with care for long-lived streams. * * [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.plugins.sse.SSEBufferPolicy.All) */ public data object All : SSEBufferPolicy } internal fun SSEBufferPolicy.toBodyBuffer(): BodyBuffer = when (this) { is SSEBufferPolicy.Off -> BodyBuffer.Empty is SSEBufferPolicy.LastEvent -> BodyBuffer.Events(1) is SSEBufferPolicy.LastEvents -> BodyBuffer.Events(count) is SSEBufferPolicy.LastLines -> BodyBuffer.Lines(count) is SSEBufferPolicy.All -> BodyBuffer.Lines(Int.MAX_VALUE) } internal sealed interface BodyBuffer { fun appendLine(line: String) {} fun appendEvent(event: ServerSentEvent) {} fun toByteArray(): ByteArray = EMPTY class Events(private val capacity: Int) : BodyBuffer { private val events = ArrayDeque<ServerSentEvent>() override fun appendEvent(event: ServerSentEvent) { if (events.size == capacity) { events.removeFirst() } events.addLast(event) } override fun toByteArray(): ByteArray { return toByteArray(events) } } class Lines(private val capacity: Int) : BodyBuffer { private val lines = ArrayDeque<String>() override fun appendLine(line: String) { if (lines.size == capacity) { lines.removeFirst() } lines.addLast(line) } override fun toByteArray(): ByteArray { return toByteArray(lines) } } object Empty : BodyBuffer } private fun toByteArray(array: ArrayDeque<*>): ByteArray = array.joinToString(NEWLINE).encodeToByteArray() private const val NEWLINE: String = "\r\n" internal val EMPTY = ByteArray(0)
kotlin
github
https://github.com/ktorio/ktor
ktor-client/ktor-client-core/common/src/io/ktor/client/plugins/sse/SSEBufferPolicy.kt
# These functions are part of the python-colorama module # They have been adjusted slightly for LinkChecker # # Copyright: (C) 2010 Jonathan Hartley <tartley@tartley.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name(s) of the copyright holders nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from winbase.h STDOUT = -11 STDERR = -12 from ctypes import (windll, byref, Structure, c_char, c_short, c_uint32, c_ushort, ArgumentError, WinError) handles = { STDOUT: windll.kernel32.GetStdHandle(STDOUT), STDERR: windll.kernel32.GetStdHandle(STDERR), } SHORT = c_short WORD = c_ushort DWORD = c_uint32 TCHAR = c_char class COORD(Structure): """struct in wincon.h""" _fields_ = [ ('X', SHORT), ('Y', SHORT), ] class SMALL_RECT(Structure): """struct in wincon.h.""" _fields_ = [ ("Left", SHORT), ("Top", SHORT), ("Right", SHORT), ("Bottom", SHORT), ] class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", WORD), ("srWindow", SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): """Get string representation of console screen buffer info.""" return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) def GetConsoleScreenBufferInfo(stream_id=STDOUT): """Get console screen buffer info object.""" handle = handles[stream_id] csbi = CONSOLE_SCREEN_BUFFER_INFO() success = windll.kernel32.GetConsoleScreenBufferInfo( handle, byref(csbi)) if not success: raise WinError() return csbi def SetConsoleTextAttribute(stream_id, attrs): """Set a console text attribute.""" handle = handles[stream_id] return windll.kernel32.SetConsoleTextAttribute(handle, attrs) # from wincon.h BLACK = 0 BLUE = 1 GREEN = 2 CYAN = 3 RED = 4 MAGENTA = 5 YELLOW = 6 GREY = 7 # from wincon.h NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background _default_foreground = None _default_background = None _default_style = None def init(): """Initialize foreground and background attributes.""" global _default_foreground, _default_background, _default_style try: attrs = GetConsoleScreenBufferInfo().wAttributes except (ArgumentError, WindowsError): _default_foreground = GREY _default_background = BLACK _default_style = NORMAL else: _default_foreground = attrs & 7 _default_background = (attrs >> 4) & 7 _default_style = attrs & BRIGHT def get_attrs(foreground, background, style): """Get foreground and background attributes.""" return foreground + (background << 4) + style def set_console(stream=STDOUT, foreground=None, background=None, style=None): """Set console foreground and background attributes.""" if foreground is None: foreground = _default_foreground if background is None: background = _default_background if style is None: style = _default_style attrs = get_attrs(foreground, background, style) SetConsoleTextAttribute(stream, attrs) def reset_console(stream=STDOUT): """Reset the console.""" set_console(stream=stream) def get_console_size(): """Get the console size.""" return GetConsoleScreenBufferInfo().dwSize
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.configurationprocessor.test; import java.util.Arrays; import java.util.HashSet; import java.util.Set; import javax.annotation.processing.SupportedAnnotationTypes; import javax.annotation.processing.SupportedSourceVersion; import javax.lang.model.SourceVersion; import org.springframework.boot.configurationprocessor.ConfigurationMetadataAnnotationProcessor; /** * Test {@link ConfigurationMetadataAnnotationProcessor}. * * @author Stephane Nicoll * @author Phillip Webb * @author Andy Wilkinson * @author Kris De Volder * @author Scott Frederick */ @SupportedAnnotationTypes({ TestConfigurationMetadataAnnotationProcessor.CONFIGURATION_PROPERTIES_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.CONFIGURATION_PROPERTIES_SOURCE_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.CONTROLLER_ENDPOINT_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.ENDPOINT_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.JMX_ENDPOINT_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.REST_CONTROLLER_ENDPOINT_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.SERVLET_ENDPOINT_ANNOTATION, TestConfigurationMetadataAnnotationProcessor.WEB_ENDPOINT_ANNOTATION, "org.springframework.context.annotation.Configuration" }) @SupportedSourceVersion(SourceVersion.RELEASE_6) public class TestConfigurationMetadataAnnotationProcessor extends ConfigurationMetadataAnnotationProcessor { public static final String CONFIGURATION_PROPERTIES_ANNOTATION = "org.springframework.boot.configurationsample.TestConfigurationProperties"; public static final String CONFIGURATION_PROPERTIES_SOURCE_ANNOTATION = "org.springframework.boot.configurationsample.TestConfigurationPropertiesSource"; public static final String NESTED_CONFIGURATION_PROPERTY_ANNOTATION = "org.springframework.boot.configurationsample.TestNestedConfigurationProperty"; public static final String DEPRECATED_CONFIGURATION_PROPERTY_ANNOTATION = "org.springframework.boot.configurationsample.TestDeprecatedConfigurationProperty"; public static final String CONSTRUCTOR_BINDING_ANNOTATION = "org.springframework.boot.configurationsample.TestConstructorBinding"; public static final String AUTOWIRED_ANNOTATION = "org.springframework.boot.configurationsample.TestAutowired"; public static final String DEFAULT_VALUE_ANNOTATION = "org.springframework.boot.configurationsample.TestDefaultValue"; public static final String CONTROLLER_ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestControllerEndpoint"; public static final String ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestEndpoint"; public static final String JMX_ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestJmxEndpoint"; public static final String REST_CONTROLLER_ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestRestControllerEndpoint"; public static final String SERVLET_ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestServletEndpoint"; public static final String WEB_ENDPOINT_ANNOTATION = "org.springframework.boot.configurationsample.TestWebEndpoint"; public static final String READ_OPERATION_ANNOTATION = "org.springframework.boot.configurationsample.TestReadOperation"; public static final String NAME_ANNOTATION = "org.springframework.boot.configurationsample.TestName"; public static final String ENDPOINT_ACCESS_ENUM = "org.springframework.boot.configurationsample.TestAccess"; public TestConfigurationMetadataAnnotationProcessor() { } @Override protected String configurationPropertiesAnnotation() { return CONFIGURATION_PROPERTIES_ANNOTATION; } @Override protected String configurationPropertiesSourceAnnotation() { return CONFIGURATION_PROPERTIES_SOURCE_ANNOTATION; } @Override protected String nestedConfigurationPropertyAnnotation() { return NESTED_CONFIGURATION_PROPERTY_ANNOTATION; } @Override protected String deprecatedConfigurationPropertyAnnotation() { return DEPRECATED_CONFIGURATION_PROPERTY_ANNOTATION; } @Override protected String constructorBindingAnnotation() { return CONSTRUCTOR_BINDING_ANNOTATION; } @Override protected String autowiredAnnotation() { return AUTOWIRED_ANNOTATION; } @Override protected String defaultValueAnnotation() { return DEFAULT_VALUE_ANNOTATION; } @Override protected Set<String> endpointAnnotations() { return new HashSet<>(Arrays.asList(CONTROLLER_ENDPOINT_ANNOTATION, ENDPOINT_ANNOTATION, JMX_ENDPOINT_ANNOTATION, REST_CONTROLLER_ENDPOINT_ANNOTATION, SERVLET_ENDPOINT_ANNOTATION, WEB_ENDPOINT_ANNOTATION)); } @Override protected String readOperationAnnotation() { return READ_OPERATION_ANNOTATION; } @Override protected String nameAnnotation() { return NAME_ANNOTATION; } @Override protected String endpointAccessEnum() { return ENDPOINT_ACCESS_ENUM; } }
java
github
https://github.com/spring-projects/spring-boot
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationprocessor/test/TestConfigurationMetadataAnnotationProcessor.java
<!-- This file is generated by scripts/process-messages/index.js. Do not edit! --> ### a11y_accesskey ``` Avoid using accesskey ``` Enforce no `accesskey` on element. Access keys are HTML attributes that allow web developers to assign keyboard shortcuts to elements. Inconsistencies between keyboard shortcuts and keyboard commands used by screen reader and keyboard-only users create accessibility complications. To avoid complications, access keys should not be used. <!-- prettier-ignore --> ```svelte <!-- A11y: Avoid using accesskey --> <div accesskey="z"></div> ``` ### a11y_aria_activedescendant_has_tabindex ``` An element with an aria-activedescendant attribute should have a tabindex value ``` An element with `aria-activedescendant` must be tabbable, so it must either have an inherent `tabindex` or declare `tabindex` as an attribute. ```svelte <!-- A11y: Elements with attribute aria-activedescendant should have tabindex value --> <div aria-activedescendant="some-id"></div> ``` ### a11y_aria_attributes ``` `<%name%>` should not have aria-* attributes ``` Certain reserved DOM elements do not support ARIA roles, states and properties. This is often because they are not visible, for example `meta`, `html`, `script`, `style`. This rule enforces that these DOM elements do not contain the `aria-*` props. ```svelte <!-- A11y: <meta> should not have aria-* attributes --> <meta aria-hidden="false" /> ``` ### a11y_autocomplete_valid ``` '%value%' is an invalid value for 'autocomplete' on `<input type="%type%">` ``` ### a11y_autofocus ``` Avoid using autofocus ``` Enforce that `autofocus` is not used on elements. Autofocusing elements can cause usability issues for sighted and non-sighted users alike. ```svelte <!-- A11y: Avoid using autofocus --> <input autofocus /> ``` ### a11y_click_events_have_key_events ``` Visible, non-interactive elements with a click event must be accompanied by a keyboard event handler. Consider whether an interactive element such as `<button type="button">` or `<a>` might be more appropriate ``` Enforce that visible, non-interactive elements with an `onclick` event are accompanied by a keyboard event handler. Users should first consider whether an interactive element might be more appropriate such as a `<button type="button">` element for actions or `<a>` element for navigations. These elements are more semantically meaningful and will have built-in key handling. E.g. `Space` and `Enter` will trigger a `<button>` and `Enter` will trigger an `<a>` element. If a non-interactive element is required then `onclick` should be accompanied by an `onkeyup` or `onkeydown` handler that enables the user to perform equivalent actions via the keyboard. In order for the user to be able to trigger a key press, the element will also need to be focusable by adding a [`tabindex`](https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/tabindex). While an `onkeypress` handler will also silence this warning, it should be noted that the `keypress` event is deprecated. ```svelte <!-- A11y: visible, non-interactive elements with an onclick event must be accompanied by a keyboard event handler. --> <div onclick={() => {}}></div> ``` Coding for the keyboard is important for users with physical disabilities who cannot use a mouse, AT compatibility, and screenreader users. ### a11y_consider_explicit_label ``` Buttons and links should either contain text or have an `aria-label`, `aria-labelledby` or `title` attribute ``` ### a11y_distracting_elements ``` Avoid `<%name%>` elements ``` Enforces that no distracting elements are used. Elements that can be visually distracting can cause accessibility issues with visually impaired users. Such elements are most likely deprecated, and should be avoided. The following elements are visually distracting: `<marquee>` and `<blink>`. ```svelte <!-- A11y: Avoid <marquee> elements --> <marquee></marquee> ``` ### a11y_figcaption_index ``` `<figcaption>` must be first or last child of `<figure>` ``` ### a11y_figcaption_parent ``` `<figcaption>` must be an immediate child of `<figure>` ``` Enforce that certain DOM elements have the correct structure. ```svelte <!-- A11y: <figcaption> must be an immediate child of <figure> --> <div> <figcaption>Image caption</figcaption> </div> ``` ### a11y_hidden ``` `<%name%>` element should not be hidden ``` Certain DOM elements are useful for screen reader navigation and should not be hidden. <!-- prettier-ignore --> ```svelte <!-- A11y: <h2> element should not be hidden --> <h2 aria-hidden="true">invisible header</h2> ``` ### a11y_img_redundant_alt ``` Screenreaders already announce `<img>` elements as an image ``` Enforce img alt attribute does not contain the word image, picture, or photo. Screen readers already announce `img` elements as an image. There is no need to use words such as _image_, _photo_, and/or _picture_. ```svelte <img src="foo" alt="Foo eating a sandwich." /> <!-- aria-hidden, won't be announced by screen reader --> <img src="bar" aria-hidden="true" alt="Picture of me taking a photo of an image" /> <!-- A11y: Screen readers already announce <img> elements as an image. --> <img src="foo" alt="Photo of foo being weird." /> <!-- A11y: Screen readers already announce <img> elements as an image. --> <img src="bar" alt="Image of me at a bar!" /> <!-- A11y: Screen readers already announce <img> elements as an image. --> <img src="foo" alt="Picture of baz fixing a bug." /> ``` ### a11y_incorrect_aria_attribute_type ``` The value of '%attribute%' must be a %type% ``` Enforce that only the correct type of value is used for aria attributes. For example, `aria-hidden` should only receive a boolean. ```svelte <!-- A11y: The value of 'aria-hidden' must be exactly one of true or false --> <div aria-hidden="yes"></div> ``` ### a11y_incorrect_aria_attribute_type_boolean ``` The value of '%attribute%' must be either 'true' or 'false'. It cannot be empty ``` ### a11y_incorrect_aria_attribute_type_id ``` The value of '%attribute%' must be a string that represents a DOM element ID ``` ### a11y_incorrect_aria_attribute_type_idlist ``` The value of '%attribute%' must be a space-separated list of strings that represent DOM element IDs ``` ### a11y_incorrect_aria_attribute_type_integer ``` The value of '%attribute%' must be an integer ``` ### a11y_incorrect_aria_attribute_type_token ``` The value of '%attribute%' must be exactly one of %values% ``` ### a11y_incorrect_aria_attribute_type_tokenlist ``` The value of '%attribute%' must be a space-separated list of one or more of %values% ``` ### a11y_incorrect_aria_attribute_type_tristate ``` The value of '%attribute%' must be exactly one of true, false, or mixed ``` ### a11y_interactive_supports_focus ``` Elements with the '%role%' interactive role must have a tabindex value ``` Enforce that elements with an interactive role and interactive handlers (mouse or key press) must be focusable or tabbable. ```svelte <!-- A11y: Elements with the 'button' interactive role must have a tabindex value. --> <div role="button" onkeypress={() => {}} /> ``` ### a11y_invalid_attribute ``` '%href_value%' is not a valid %href_attribute% attribute ``` Enforce that attributes important for accessibility have a valid value. For example, `href` should not be empty, `'#'`, or `javascript:`. ```svelte <!-- A11y: '' is not a valid href attribute --> <a href="">invalid</a> ``` ### a11y_label_has_associated_control ``` A form label must be associated with a control ``` Enforce that a label tag has a text label and an associated control. There are two supported ways to associate a label with a control: - Wrapping a control in a label tag. - Adding `for` to a label and assigning it the ID of an input on the page. ```svelte <label for="id">B</label> <label>C <input type="text" /></label> <!-- A11y: A form label must be associated with a control. --> <label>A</label> ``` ### a11y_media_has_caption ``` `<video>` elements must have a `<track kind="captions">` ``` Providing captions for media is essential for deaf users to follow along. Captions should be a transcription or translation of the dialogue, sound effects, relevant musical cues, and other relevant audio information. Not only is this important for accessibility, but can also be useful for all users in the case that the media is unavailable (similar to `alt` text on an image when an image is unable to load). The captions should contain all important and relevant information to understand the corresponding media. This may mean that the captions are not a 1:1 mapping of the dialogue in the media content. However, captions are not necessary for video components with the `muted` attribute. ```svelte <video><track kind="captions" /></video> <audio muted></audio> <!-- A11y: Media elements must have a <track kind=\"captions\"> --> <video></video> <!-- A11y: Media elements must have a <track kind=\"captions\"> --> <video><track /></video> ``` ### a11y_misplaced_role ``` `<%name%>` should not have role attribute ``` Certain reserved DOM elements do not support ARIA roles, states and properties. This is often because they are not visible, for example `meta`, `html`, `script`, `style`. This rule enforces that these DOM elements do not contain the `role` props. ```svelte <!-- A11y: <meta> should not have role attribute --> <meta role="tooltip" /> ``` ### a11y_misplaced_scope ``` The scope attribute should only be used with `<th>` elements ``` The scope attribute should only be used on `<th>` elements. <!-- prettier-ignore --> ```svelte <!-- A11y: The scope attribute should only be used with <th> elements --> <div scope="row" /> ``` ### a11y_missing_attribute ``` `<%name%>` element should have %article% %sequence% attribute ``` Enforce that attributes required for accessibility are present on an element. This includes the following checks: - `<a>` should have an href (unless it's a [fragment-defining tag](https://github.com/sveltejs/svelte/issues/4697)) - `<area>` should have alt, aria-label, or aria-labelledby - `<html>` should have lang - `<iframe>` should have title - `<img>` should have alt - `<object>` should have title, aria-label, or aria-labelledby - `<input type="image">` should have alt, aria-label, or aria-labelledby ```svelte <!-- A11y: <input type=\"image\"> element should have an alt, aria-label or aria-labelledby attribute --> <input type="image" /> <!-- A11y: <html> element should have a lang attribute --> <html></html> <!-- A11y: <a> element should have an href attribute --> <a>text</a> ``` ### a11y_missing_content ``` `<%name%>` element should contain text ``` Enforce that heading elements (`h1`, `h2`, etc.) and anchors have content and that the content is accessible to screen readers ```svelte <!-- A11y: <a> element should have child content --> <a href="/foo"></a> <!-- A11y: <h1> element should have child content --> <h1></h1> ``` ### a11y_mouse_events_have_key_events ``` '%event%' event must be accompanied by '%accompanied_by%' event ``` Enforce that `onmouseover` and `onmouseout` are accompanied by `onfocus` and `onblur`, respectively. This helps to ensure that any functionality triggered by these mouse events is also accessible to keyboard users. ```svelte <!-- A11y: onmouseover must be accompanied by onfocus --> <div onmouseover={handleMouseover} /> <!-- A11y: onmouseout must be accompanied by onblur --> <div onmouseout={handleMouseout} /> ``` ### a11y_no_abstract_role ``` Abstract role '%role%' is forbidden ``` ### a11y_no_interactive_element_to_noninteractive_role ``` `<%element%>` cannot have role '%role%' ``` [WAI-ARIA](https://www.w3.org/TR/wai-aria-1.1/#usage_intro) roles should not be used to convert an interactive element to a non-interactive element. Non-interactive ARIA roles include `article`, `banner`, `complementary`, `img`, `listitem`, `main`, `region` and `tooltip`. ```svelte <!-- A11y: <textarea> cannot have role 'listitem' --> <textarea role="listitem"></textarea> ``` ### a11y_no_noninteractive_element_interactions ``` Non-interactive element `<%element%>` should not be assigned mouse or keyboard event listeners ``` A non-interactive element does not support event handlers (mouse and key handlers). Non-interactive elements include `<main>`, `<area>`, `<h1>` (,`<h2>`, etc), `<p>`, `<img>`, `<li>`, `<ul>` and `<ol>`. Non-interactive [WAI-ARIA roles](https://www.w3.org/TR/wai-aria-1.1/#usage_intro) include `article`, `banner`, `complementary`, `img`, `listitem`, `main`, `region` and `tooltip`. ```sv <!-- `A11y: Non-interactive element <li> should not be assigned mouse or keyboard event listeners.` --> <li onclick={() => {}}></li> <!-- `A11y: Non-interactive element <div> should not be assigned mouse or keyboard event listeners.` --> <div role="listitem" onclick={() => {}}></div> ``` ### a11y_no_noninteractive_element_to_interactive_role ``` Non-interactive element `<%element%>` cannot have interactive role '%role%' ``` [WAI-ARIA](https://www.w3.org/TR/wai-aria-1.1/#usage_intro) roles should not be used to convert a non-interactive element to an interactive element. Interactive ARIA roles include `button`, `link`, `checkbox`, `menuitem`, `menuitemcheckbox`, `menuitemradio`, `option`, `radio`, `searchbox`, `switch` and `textbox`. ```svelte <!-- A11y: Non-interactive element <h3> cannot have interactive role 'searchbox' --> <h3 role="searchbox">Button</h3> ``` ### a11y_no_noninteractive_tabindex ``` noninteractive element cannot have nonnegative tabIndex value ``` Tab key navigation should be limited to elements on the page that can be interacted with. <!-- prettier-ignore --> ```svelte <!-- A11y: noninteractive element cannot have nonnegative tabIndex value --> <div tabindex="0"></div> ``` ### a11y_no_redundant_roles ``` Redundant role '%role%' ``` Some HTML elements have default ARIA roles. Giving these elements an ARIA role that is already set by the browser [has no effect](https://www.w3.org/TR/using-aria/#aria-does-nothing) and is redundant. ```svelte <!-- A11y: Redundant role 'button' --> <button role="button">...</button> <!-- A11y: Redundant role 'img' --> <img role="img" src="foo.jpg" /> ``` ### a11y_no_static_element_interactions ``` `<%element%>` with a %handler% handler must have an ARIA role ``` Elements like `<div>` with interactive handlers like `click` must have an ARIA role. <!-- prettier-ignore --> ```svelte <!-- A11y: <div> with click handler must have an ARIA role --> <div onclick={() => ''}></div> ``` ### a11y_positive_tabindex ``` Avoid tabindex values above zero ``` Avoid positive `tabindex` property values. This will move elements out of the expected tab order, creating a confusing experience for keyboard users. <!-- prettier-ignore --> ```svelte <!-- A11y: avoid tabindex values above zero --> <div tabindex="1"></div> ``` ### a11y_role_has_required_aria_props ``` Elements with the ARIA role "%role%" must have the following attributes defined: %props% ``` Elements with ARIA roles must have all required attributes for that role. ```svelte <!-- A11y: A11y: Elements with the ARIA role "checkbox" must have the following attributes defined: "aria-checked" --> <span role="checkbox" aria-labelledby="foo" tabindex="0"></span> ``` ### a11y_role_supports_aria_props ``` The attribute '%attribute%' is not supported by the role '%role%' ``` Elements with explicit or implicit roles defined contain only `aria-*` properties supported by that role. ```svelte <!-- A11y: The attribute 'aria-multiline' is not supported by the role 'link'. --> <div role="link" aria-multiline></div> <!-- A11y: The attribute 'aria-required' is not supported by the role 'listitem'. This role is implicit on the element <li>. --> <li aria-required></li> ``` ### a11y_role_supports_aria_props_implicit ``` The attribute '%attribute%' is not supported by the role '%role%'. This role is implicit on the element `<%name%>` ``` Elements with explicit or implicit roles defined contain only `aria-*` properties supported by that role. ```svelte <!-- A11y: The attribute 'aria-multiline' is not supported by the role 'link'. --> <div role="link" aria-multiline></div> <!-- A11y: The attribute 'aria-required' is not supported by the role 'listitem'. This role is implicit on the element <li>. --> <li aria-required></li> ``` ### a11y_unknown_aria_attribute ``` Unknown aria attribute 'aria-%attribute%' ``` ``` Unknown aria attribute 'aria-%attribute%'. Did you mean '%suggestion%'? ``` Enforce that only known ARIA attributes are used. This is based on the [WAI-ARIA States and Properties spec](https://www.w3.org/WAI/PF/aria-1.1/states_and_properties). ```svelte <!-- A11y: Unknown aria attribute 'aria-labeledby' (did you mean 'labelledby'?) --> <input type="image" aria-labeledby="foo" /> ``` ### a11y_unknown_role ``` Unknown role '%role%' ``` ``` Unknown role '%role%'. Did you mean '%suggestion%'? ``` Elements with ARIA roles must use a valid, non-abstract ARIA role. A reference to role definitions can be found at [WAI-ARIA](https://www.w3.org/TR/wai-aria/#role_definitions) site. <!-- prettier-ignore --> ```svelte <!-- A11y: Unknown role 'toooltip' (did you mean 'tooltip'?) --> <div role="toooltip"></div> ``` ### attribute_avoid_is ``` The "is" attribute is not supported cross-browser and should be avoided ``` ### attribute_global_event_reference ``` You are referencing `globalThis.%name%`. Did you forget to declare a variable with that name? ``` ### attribute_illegal_colon ``` Attributes should not contain ':' characters to prevent ambiguity with Svelte directives ``` ### attribute_invalid_property_name ``` '%wrong%' is not a valid HTML attribute. Did you mean '%right%'? ``` ### attribute_quoted ``` Quoted attributes on components and custom elements will be stringified in a future version of Svelte. If this isn't what you want, remove the quotes ``` ### bidirectional_control_characters ``` A bidirectional control character was detected in your code. These characters can be used to alter the visual direction of your code and could have unintended consequences ``` Bidirectional control characters can alter the direction in which text appears to be in. For example, via control characters, you can make `defabc` look like `abcdef`. As a result, if you were to unknowingly copy and paste some code that has these control characters, they may alter the behavior of your code in ways you did not intend. See [trojansource.codes](https://trojansource.codes/) for more information. ### bind_invalid_each_rest ``` The rest operator (...) will create a new object and binding '%name%' with the original object will not work ``` ### block_empty ``` Empty block ``` ### component_name_lowercase ``` `<%name%>` will be treated as an HTML element unless it begins with a capital letter ``` ### css_unused_selector ``` Unused CSS selector "%name%" ``` Svelte traverses both the template and the `<style>` tag to find out which of the CSS selectors are not used within the template, so it can remove them. In some situations a selector may target an element that is not 'visible' to the compiler, for example because it is part of an `{@html ...}` tag or you're overriding styles in a child component. In these cases, use [`:global`](/docs/svelte/global-styles) to preserve the selector as-is: ```svelte <div class="post">{@html content}</div> <style> .post :global { p {...} } </style> ``` ### custom_element_props_identifier ``` Using a rest element or a non-destructured declaration with `$props()` means that Svelte can't infer what properties to expose when creating a custom element. Consider destructuring all the props or explicitly specifying the `customElement.props` option. ``` ### element_implicitly_closed ``` This element is implicitly closed by the following `%tag%`, which can cause an unexpected DOM structure. Add an explicit `%closing%` to avoid surprises. ``` In HTML, some elements are implicitly closed by another element. For example, you cannot nest a `<p>` inside another `<p>`: ```html <!-- this HTML... --> <p><p>hello</p> <!-- results in this DOM structure --> <p></p> <p>hello</p> ``` Similarly, a parent element's closing tag will implicitly close all child elements, even if the `</` was a typo and you meant to create a _new_ element. To avoid ambiguity, it's always a good idea to have an explicit closing tag. ### element_invalid_self_closing_tag ``` Self-closing HTML tags for non-void elements are ambiguous — use `<%name% ...></%name%>` rather than `<%name% ... />` ``` In HTML, there's [no such thing as a self-closing tag](https://jakearchibald.com/2023/against-self-closing-tags-in-html/). While this _looks_ like a self-contained element with some text next to it... ```html <div> <span class="icon" /> some text! </div> ``` ...a spec-compliant HTML parser (such as a browser) will in fact parse it like this, with the text _inside_ the icon: ```html <div> <span class="icon"> some text! </span> </div> ``` Some templating languages (including Svelte) will 'fix' HTML by turning `<span />` into `<span></span>`. Others adhere to the spec. Both result in ambiguity and confusion when copy-pasting code between different contexts, so Svelte prompts you to resolve the ambiguity directly by having an explicit closing tag. To automate this, run the dedicated migration: ```sh npx sv migrate self-closing-tags ``` In a future version of Svelte, self-closing tags may be upgraded from a warning to an error. ### event_directive_deprecated ``` Using `on:%name%` to listen to the %name% event is deprecated. Use the event attribute `on%name%` instead ``` See [the migration guide](v5-migration-guide#Event-changes) for more info. ### export_let_unused ``` Component has unused export property '%name%'. If it is for external reference only, please consider using `export const %name%` ``` ### legacy_code ``` `%code%` is no longer valid — please use `%suggestion%` instead ``` ### legacy_component_creation ``` Svelte 5 components are no longer classes. Instantiate them using `mount` or `hydrate` (imported from 'svelte') instead. ``` See the [migration guide](v5-migration-guide#Components-are-no-longer-classes) for more info. ### node_invalid_placement_ssr ``` %message%. When rendering this component on the server, the resulting HTML will be modified by the browser (by moving, removing, or inserting elements), likely resulting in a `hydration_mismatch` warning ``` HTML restricts where certain elements can appear. In case of a violation the browser will 'repair' the HTML in a way that breaks Svelte's assumptions about the structure of your components. Some examples: - `<p>hello <div>world</div></p>` will result in `<p>hello </p><div>world</div><p></p>` (the `<div>` autoclosed the `<p>` because `<p>` cannot contain block-level elements) - `<option><div>option a</div></option>` will result in `<option>option a</option>` (the `<div>` is removed) - `<table><tr><td>cell</td></tr></table>` will result in `<table><tbody><tr><td>cell</td></tr></tbody></table>` (a `<tbody>` is auto-inserted) This code will work when the component is rendered on the client (which is why this is a warning rather than an error), but if you use server rendering it will cause hydration to fail. ### non_reactive_update ``` `%name%` is updated, but is not declared with `$state(...)`. Changing its value will not correctly trigger updates ``` This warning is thrown when the compiler detects the following: - a variable was declared without `$state` or `$state.raw` - the variable is reassigned - the variable is read in a reactive context In this case, changing the value will not correctly trigger updates. Example: ```svelte <script> let reactive = $state('reactive'); let stale = 'stale'; </script> <p>This value updates: {reactive}</p> <p>This value does not update: {stale}</p> <button onclick={() => { stale = 'updated'; reactive = 'updated'; }}>update</button> ``` To fix this, wrap your variable declaration with `$state`. ### options_deprecated_accessors ``` The `accessors` option has been deprecated. It will have no effect in runes mode ``` ### options_deprecated_immutable ``` The `immutable` option has been deprecated. It will have no effect in runes mode ``` ### options_missing_custom_element ``` The `customElement` option is used when generating a custom element. Did you forget the `customElement: true` compile option? ``` ### options_removed_enable_sourcemap ``` The `enableSourcemap` option has been removed. Source maps are always generated now, and tooling can choose to ignore them ``` ### options_removed_hydratable ``` The `hydratable` option has been removed. Svelte components are always hydratable now ``` ### options_removed_loop_guard_timeout ``` The `loopGuardTimeout` option has been removed ``` ### options_renamed_ssr_dom ``` `generate: "dom"` and `generate: "ssr"` options have been renamed to "client" and "server" respectively ``` ### perf_avoid_inline_class ``` Avoid 'new class' — instead, declare the class at the top level scope ``` ### perf_avoid_nested_class ``` Avoid declaring classes below the top level scope ``` ### reactive_declaration_invalid_placement ``` Reactive declarations only exist at the top level of the instance script ``` ### reactive_declaration_module_script_dependency ``` Reassignments of module-level declarations will not cause reactive statements to update ``` ### script_context_deprecated ``` `context="module"` is deprecated, use the `module` attribute instead ``` ```svelte <script ---context="module"--- +++module+++> let foo = 'bar'; </script> ``` ### script_unknown_attribute ``` Unrecognized attribute — should be one of `generics`, `lang` or `module`. If this exists for a preprocessor, ensure that the preprocessor removes it ``` ### slot_element_deprecated ``` Using `<slot>` to render parent content is deprecated. Use `{@render ...}` tags instead ``` See [the migration guide](v5-migration-guide#Snippets-instead-of-slots) for more info. ### state_referenced_locally ``` This reference only captures the initial value of `%name%`. Did you mean to reference it inside a %type% instead? ``` This warning is thrown when the compiler detects the following: - A reactive variable is declared - ...and later reassigned... - ...and referenced in the same scope This 'breaks the link' to the original state declaration. For example, if you pass the state to a function, the function loses access to the state once it is reassigned: ```svelte <!--- file: Parent.svelte ---> <script> import { setContext } from 'svelte'; let count = $state(0); // warning: state_referenced_locally setContext('count', count); </script> <button onclick={() => count++}> increment </button> ``` ```svelte <!--- file: Child.svelte ---> <script> import { getContext } from 'svelte'; const count = getContext('count'); </script> <!-- This will never update --> <p>The count is {count}</p> ``` To fix this, reference the variable such that it is lazily evaluated. For the above example, this can be achieved by wrapping `count` in a function: ```svelte <!--- file: Parent.svelte ---> <script> import { setContext } from 'svelte'; let count = $state(0); setContext('count', +++() => count+++); </script> <button onclick={() => count++}> increment </button> ``` ```svelte <!--- file: Child.svelte ---> <script> import { getContext } from 'svelte'; const count = getContext('count'); </script> <!-- This will update --> <p>The count is {+++count()+++}</p> ``` For more info, see [Passing state into functions]($state#Passing-state-into-functions). ### store_rune_conflict ``` It looks like you're using the `$%name%` rune, but there is a local binding called `%name%`. Referencing a local variable with a `$` prefix will create a store subscription. Please rename `%name%` to avoid the ambiguity ``` ### svelte_component_deprecated ``` `<svelte:component>` is deprecated in runes mode — components are dynamic by default ``` In previous versions of Svelte, the component constructor was fixed when the component was rendered. In other words, if you wanted `<X>` to re-render when `X` changed, you would either have to use `<svelte:component this={X}>` or put the component inside a `{#key X}...{/key}` block. In Svelte 5 this is no longer true — if `X` changes, `<X>` re-renders. In some cases `<object.property>` syntax can be used as a replacement; a lowercased variable with property access is recognized as a component in Svelte 5. For complex component resolution logic, an intermediary, capitalized variable may be necessary. E.g. in places where `@const` can be used: <!-- prettier-ignore --> ```svelte {#each items as item} ---<svelte:component this={item.condition ? Y : Z} />--- +++{@const Component = item.condition ? Y : Z}+++ +++<Component />+++ {/each} ``` A derived value may be used in other contexts: <!-- prettier-ignore --> ```svelte <script> // ... let condition = $state(false); +++const Component = $derived(condition ? Y : Z);+++ </script> ---<svelte:component this={condition ? Y : Z} />--- +++<Component />+++ ``` ### svelte_element_invalid_this ``` `this` should be an `{expression}`. Using a string attribute value will cause an error in future versions of Svelte ``` ### svelte_self_deprecated ``` `<svelte:self>` is deprecated — use self-imports (e.g. `import %name% from './%basename%'`) instead ``` See [the note in the docs](legacy-svelte-self) for more info. ### unknown_code ``` `%code%` is not a recognised code ``` ``` `%code%` is not a recognised code (did you mean `%suggestion%`?) ```
unknown
github
https://github.com/sveltejs/svelte
documentation/docs/98-reference/.generated/compile-warnings.md
import argparse import json import os import sys from typing import List import jsonschema from license_expression import get_spdx_licensing from referencing import Registry, Resource BOM_SCHEMA_LOCATION = os.path.join("buildscripts", "tests", "sbom_linter", "bom-1.5.schema.json") SPDX_SCHEMA_LOCATION = os.path.join("buildscripts", "tests", "sbom_linter", "spdx.schema.json") SPDX_SCHEMA_REF = "spdx.schema.json" # directory to scan for third party libraries THIRD_PARTY_DIR = os.path.join("src", "third_party") # platform independent prefix of third party libraries THIRD_PARTY_LOCATION_PREFIX = "src/third_party/" # This should only be set to true for testing to ensure the tests to not rely on the current state # of the third party library dir. SKIP_FILE_CHECKING = False # Error messages used for matching in testing UNDEFINED_THIRD_PARTY_ERROR = ( "The following files in src/third_party do not have components defined in the sbom:" ) FORMATTING_ERROR = "File has incorrect formatting, re-run `buildscripts/sbom_linter.py` with the `--format` option to fix this." MISSING_PURL_CPE_ERROR = "Component must include a 'purl' or 'cpe' field." MISSING_EVIDENCE_ERROR = ( "Component must include an 'evidence.occurrences' field when the scope is required." ) MISSING_TEAM_ERROR = "Component must include a 'internal:team_responsible' property." SCHEMA_MATCH_FAILURE = "File did not match the CycloneDX schema" MISSING_VERSION_IN_SBOM_COMPONENT_ERROR = "Component must include a version." MISSING_VERSION_IN_IMPORT_FILE_ERROR = "Missing version in the import file: " MISSING_LICENSE_IN_SBOM_COMPONENT_ERROR = "Component must include a license." COULD_NOT_FIND_OR_READ_SCRIPT_FILE_ERROR = "Could not find or read the import script file" VERSION_MISMATCH_ERROR = "Version mismatch (may simply be an artifact of SBOM automation): " # A class for managing error messages for components class ErrorManager: def __init__(self, input_file: str): self.input_file: str = input_file self.component_name: str = "" self.errors: List[str] = [] def update_component_attribute(self, component_name: str) -> None: self.component_name = component_name def append(self, message: str) -> None: self.errors.append(message) def append_full_error_message(self, message: str) -> None: if self.component_name == "": self.errors.append(f"Input-file:{self.input_file} Error: {message}") else: self.errors.append( f"Input-file:{self.input_file} Component:{self.component_name} Error: {message}" ) def print_errors(self) -> None: if self.errors: print("\n".join(self.errors), file=sys.stderr) def zero_error(self) -> bool: return bool(not self.errors) def find_message_in_errors(self, message: str) -> bool: message_found = False for error in self.errors: if message in error: message_found = True break return message_found def get_schema() -> dict: with open(BOM_SCHEMA_LOCATION, "r") as schema_data: return json.load(schema_data) def local_schema_registry(): "Create a local registry which is used to resolve references to external schema" with open(SPDX_SCHEMA_LOCATION, "r") as spdx: spdx_schema = Resource.from_contents(json.load(spdx)) return Registry().with_resources([(SPDX_SCHEMA_REF, spdx_schema)]) # The script_path is a file which may contain a line where two tokens script_version_key and # the version string are separated by a separtor value of "=" and some optional spaces. # There is an "end of line" delimiter at the end of the line which needs to be stripped. def get_script_version( script_path: str, script_version_key: str, error_manager: ErrorManager ) -> str: result = "" try: file = open(script_path, "r") except OSError: error_manager.append_full_error_message(COULD_NOT_FIND_OR_READ_SCRIPT_FILE_ERROR) return result with file: for line in file: # Remove possible spaces, string delimiters and an "end of line" delimiter. tokens = line.rstrip().replace('"', "").replace(" ", "").split("=") if (len(tokens) > 1) and (tokens[0] == script_version_key): result = tokens[1] break return result # A version string sometimes contains an extra prefix like "v1.2" instead of "1.2" # This function strips that extra prefix. def strip_extra_prefixes(string_with_prefix: str) -> str: return string_with_prefix.removeprefix("mongo/").removeprefix("v") def validate_license(component: dict, error_manager: ErrorManager) -> None: if "licenses" not in component: error_manager.append_full_error_message(MISSING_LICENSE_IN_SBOM_COMPONENT_ERROR) return valid_license = False expression = None for component_license in component["licenses"]: if "expression" in component_license: expression = component_license.get("expression") elif "license" in component_license: if "id" in component_license["license"]: # Should be a valid SPDX license ID expression = component_license["license"].get("id") elif "name" in component_license["license"]: # If SPDX does not define the license used, the name field may be used to provide the license name valid_license = True if not valid_license: licensing_validate = get_spdx_licensing().validate(expression, validate=True) # ExpressionInfo( # original_expression='', # normalized_expression='', # errors=[], # invalid_symbols=[] # ) valid_license = not licensing_validate.errors or not licensing_validate.invalid_symbols if not valid_license: error_manager.append_full_error_message(licensing_validate) return def validate_evidence(component: dict, third_party_libs: set, error_manager: ErrorManager) -> None: if component["scope"] == "required": if "evidence" not in component or "occurrences" not in component["evidence"]: error_manager.append_full_error_message(MISSING_EVIDENCE_ERROR) return validate_location(component, third_party_libs, error_manager) def validate_properties(component: dict, error_manager: ErrorManager) -> None: has_team_responsible_property = False or component["scope"] == "excluded" script_path = "" if "properties" in component: for prop in component["properties"]: if prop["name"] == "internal:team_responsible": has_team_responsible_property = True elif prop["name"] == "import_script_path": script_path = prop["value"] if not has_team_responsible_property: error_manager.append_full_error_message(MISSING_TEAM_ERROR) if not component.get("version"): error_manager.append_full_error_message(MISSING_VERSION_IN_SBOM_COMPONENT_ERROR) return comp_version = component["version"] # If the version is unknown or the script path property is absent, the version # check is not possible (these are valid options and no error is generated). if comp_version == "Unknown" or script_path == "": return # Include the .pedigree.descendants[0] version for version matching if ( "pedigree" in component and "descendants" in component["pedigree"] and "version" in component["pedigree"]["descendants"][0] ): comp_pedigree_version = component["pedigree"]["descendants"][0]["version"] else: comp_pedigree_version = "" # At this point a version is attempted to be read from the import script file script_version = get_script_version(script_path, "VERSION", error_manager) if script_version == "": error_manager.append_full_error_message(MISSING_VERSION_IN_IMPORT_FILE_ERROR + script_path) elif strip_extra_prefixes(script_version) != strip_extra_prefixes( comp_version ) and strip_extra_prefixes(script_version) != strip_extra_prefixes(comp_pedigree_version): print( f"WARNING: {VERSION_MISMATCH_ERROR}\n script version:{script_version}\n sbom component version:{comp_version}\n sbom component pedigree version:{comp_pedigree_version}" ) def validate_component(component: dict, third_party_libs: set, error_manager: ErrorManager) -> None: error_manager.update_component_attribute(component["name"]) if "scope" not in component: error_manager.append_full_error_message("component must include a scope.") else: validate_evidence(component, third_party_libs, error_manager) validate_properties(component, error_manager) validate_license(component, error_manager) if "purl" not in component and "cpe" not in component: error_manager.append_full_error_message(MISSING_PURL_CPE_ERROR) error_manager.update_component_attribute("") def validate_location(component: dict, third_party_libs: set, error_manager: ErrorManager) -> None: if "evidence" in component: if "occurrences" not in component["evidence"]: error_manager.append_full_error_message( "'evidence.occurrences' field must include at least one location." ) occurrences = component["evidence"]["occurrences"] for occurrence in occurrences: if "location" in occurrence: location = occurrence["location"] if not os.path.exists(location) and not SKIP_FILE_CHECKING: error_manager.append_full_error_message("location does not exist in repo.") if location.startswith(THIRD_PARTY_LOCATION_PREFIX): lib = location.removeprefix(THIRD_PARTY_LOCATION_PREFIX) if lib in third_party_libs: third_party_libs.remove(lib) def lint_sbom( input_file: str, output_file: str, third_party_libs: set, should_format: bool ) -> ErrorManager: with open(input_file, "r", encoding="utf-8") as sbom_file: sbom_text = sbom_file.read() error_manager = ErrorManager(input_file) try: sbom = json.loads(sbom_text) except Exception as ex: error_manager.append(f"Failed to parse {input_file}: {str(ex)}") return error_manager try: schema = get_schema() jsonschema.validators.validator_for(schema)( schema, registry=local_schema_registry() ).validate(sbom) except jsonschema.ValidationError as error: error_manager.append(f"{SCHEMA_MATCH_FAILURE} {input_file}") error_manager.append(error.message) return error_manager components = sbom["components"] for component in components: validate_component(component, third_party_libs, error_manager) if third_party_libs: error_manager.append(UNDEFINED_THIRD_PARTY_ERROR) for lib in third_party_libs: error_manager.append(f" {lib}") formatted_sbom = json.dumps(sbom, indent=2) + "\n" if formatted_sbom != sbom_text: error_manager.append(f"{input_file} {FORMATTING_ERROR}") if should_format: with open(output_file, "w", encoding="utf-8") as sbom_file: sbom_file.write(formatted_sbom) return error_manager def main() -> int: os.chdir(os.environ.get("BUILD_WORKSPACE_DIRECTORY", ".")) parser = argparse.ArgumentParser() parser.add_argument( "--format", action="store_true", default=False, help="Whether to apply formatting to the output file.", ) parser.add_argument( "--input-file", default="sbom.json", help="The input CycloneDX file to format and lint." ) parser.add_argument( "--output-file", default="sbom.json", help="The file to output to when formatting is specified.", ) args = parser.parse_args() should_format = args.format input_file = args.input_file output_file = args.output_file third_party_libs = set( [ path for path in os.listdir(THIRD_PARTY_DIR) if not os.path.isfile(os.path.join(THIRD_PARTY_DIR, path)) ] ) # the only files in this dir that are not third party libs third_party_libs.remove("scripts") # the only files in the sasl dir are BUILD files to setup the sasl library in Windows third_party_libs.remove("sasl") # This is not a real third party, its just the local ssl pretending to be boringssl third_party_libs.remove("boringssl_replacement") # This is just a build file that gets inserted into a third party third_party_libs.remove("wasmtime") # Nothing in this directory is included in Community/EA third_party_libs.remove("private") error_manager = lint_sbom(input_file, output_file, third_party_libs, should_format) error_manager.print_errors() return 0 if error_manager.zero_error() else 1 if __name__ == "__main__": sys.exit(main())
python
github
https://github.com/mongodb/mongo
buildscripts/sbom_linter.py
def Dijkstra(G, start, end=None): """ Find shortest paths from the start vertex to all vertices nearer than or equal to the end. The input graph G is assumed to have the following representation: A vertex can be any object that can be used as an index into a dictionary. G is a dictionary, indexed by vertices. For any vertex v, G[v] is itself a dictionary, indexed by the neighbors of v. For any edge v->w, G[v][w] is the length of the edge. This is related to the representation in <http://www.python.org/doc/essays/graphs.html> where Guido van Rossum suggests representing graphs as dictionaries mapping vertices to lists of outgoing edges, however dictionaries of edges have many advantages over lists: they can store extra information (here, the lengths), they support fast existence tests, and they allow easy modification of the graph structure by edge insertion and removal. Such modifications are not needed here but are important in many other graph algorithms. Since dictionaries obey iterator protocol, a graph represented as described here could be handed without modification to an algorithm expecting Guido's graph representation. Of course, G and G[v] need not be actual Python dict objects, they can be any other type of object that obeys dict protocol, for instance one could use a wrapper in which vertices are URLs of web pages and a call to G[v] loads the web page and finds its outgoing links. The output is a pair (D,P) where D[v] is the distance from start to v and P[v] is the predecessor of v along the shortest path from s to v. Dijkstra's algorithm is only guaranteed to work correctly when all edge lengths are positive. This code does not verify this property for all edges (only the edges examined until the end vertex is reached), but will correctly compute shortest paths even for some graphs with negative edges, and will raise an exception if it discovers that a negative edge has caused it to make a mistake. """ D = {} # dictionary of final distances P = {} # dictionary of predecessors Q = priorityDictionary() # estimated distances of non-final vertices Q[start] = 0 for v in Q: D[v] = Q[v] if v == end: break for w in G[v]: vwLength = D[v] + G[v][w] if w in D: if vwLength < D[w]: raise ValueError("Dijkstra: found better path to already-final vertex") elif w not in Q or vwLength < Q[w]: Q[w] = vwLength P[w] = v return (D, P) def shortestPath(G, start, end): """ Find a single shortest path from the given start vertex to the given end vertex. The input has the same conventions as Dijkstra(). The output is a list of the vertices in order along the shortest path. """ D, P = Dijkstra(G, start, end) Path = [] while 1: Path.append(end) if end == start: break end = P[end] Path.reverse() return Path # example, CLR p.528 G = {'s': {'u':10, 'x':5}, 'u': {'v':1, 'x':2}, 'v': {'y':4}, 'x':{'u':3,'v':9,'y':2}, 'y':{'s':7,'v':6}} print(Dijkstra(G,'s')) print(shortestPath(G,'s','v'))
unknown
codeparrot/codeparrot-clean
from django import forms from django.shortcuts import render_to_response from django.utils.safestring import mark_safe from django.db.models import Q from airmozilla.main.models import Picture from airmozilla.main.helpers import thumbnail class _BaseForm(object): def clean(self): cleaned_data = super(_BaseForm, self).clean() for field in cleaned_data: if isinstance(cleaned_data[field], basestring): cleaned_data[field] = ( cleaned_data[field].replace('\r\n', '\n') .replace(u'\u2018', "'").replace(u'\u2019', "'").strip()) return cleaned_data class BaseModelForm(_BaseForm, forms.ModelForm): pass class BaseForm(_BaseForm, forms.Form): pass class GallerySelect(forms.widgets.Widget): """ Produces a gallery of all Pictures for the user to select from. """ def __init__(self, *args, **kwargs): self.event = kwargs.pop('event', None) super(GallerySelect, self).__init__(*args, **kwargs) def render(self, name, value, attrs): pictures = [] qs = Picture.objects.all() if self.event: qs = qs.filter( Q(event__isnull=True) | Q(event=self.event) ) # If the current event does use an inactive picture, # let it still be a choice. if self.event.picture_id: qs = qs.filter( Q(is_active=True) | Q(id=self.event.picture_id) ) else: qs = qs.filter(is_active=True) else: qs = qs.filter( event__isnull=True, is_active=True, ) for pic in qs.order_by('event', '-created'): thumb = thumbnail(pic.file, '160x90', crop='center') pictures.append({ 'thumb': { 'url': thumb.url, 'width': thumb.width, 'height': thumb.height }, 'notes': pic.notes, 'selected': value == pic.id, 'id': pic.id, }) context = { 'pictures': pictures, 'current_id': value, 'name': name } return mark_safe(render_to_response('gallery.html', context).content) class Media: # NOTE! At the moment, these are replicated manually wherever # this form widget is used. That's because jinja offline compression # with {{ form.media.js }} doesn't work. css = {'all': ('css/gallery_select.css',)} js = ('js/gallery_select.js',)
unknown
codeparrot/codeparrot-clean
import Link from "next/link"; import { draftMode } from "next/headers"; import MoreStories from "../../more-stories"; import Avatar from "../../avatar"; import Date from "../../date"; import CoverImage from "../../cover-image"; import { Markdown } from "@/lib/markdown"; import { getAllPosts, getPostAndMorePosts } from "@/lib/api"; export async function generateStaticParams() { const allPosts = await getAllPosts(false); return allPosts.map((post) => ({ slug: post.slug, })); } export default async function PostPage({ params, }: { params: { slug: string }; }) { const { isEnabled } = draftMode(); const { post, morePosts } = await getPostAndMorePosts(params.slug, isEnabled); return ( <div className="container mx-auto px-5"> <h2 className="mb-20 mt-8 text-2xl font-bold leading-tight tracking-tight md:text-4xl md:tracking-tighter"> <Link href="/" className="hover:underline"> Blog </Link> . </h2> <article> <h1 className="mb-12 text-center text-6xl font-bold leading-tight tracking-tighter md:text-left md:text-7xl md:leading-none lg:text-8xl"> {post.title} </h1> <div className="hidden md:mb-12 md:block"> {post.author && ( <Avatar name={post.author.name} picture={post.author.picture} /> )} </div> <div className="mb-8 sm:mx-0 md:mb-16"> <CoverImage title={post.title} url={post.coverImage.url} /> </div> <div className="mx-auto max-w-2xl"> <div className="mb-6 block md:hidden"> {post.author && ( <Avatar name={post.author.name} picture={post.author.picture} /> )} </div> <div className="mb-6 text-lg"> <Date dateString={post.date} /> </div> </div> <div className="mx-auto max-w-2xl"> <div className="prose"> <Markdown content={post.content} /> </div> </div> </article> <hr className="border-accent-2 mt-28 mb-24" /> <MoreStories morePosts={morePosts} /> </div> ); }
typescript
github
https://github.com/vercel/next.js
examples/cms-contentful/app/posts/[slug]/page.tsx
# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' name: auto plugin_type: inventory author: - Matt Davis <@nitzmahone> short_description: Loads and executes an inventory plugin specified in a YAML config description: - By whitelisting C(auto) as the final inventory plugin, any YAML inventory config file with a C(plugin) key at its root will automatically cause the named plugin to be loaded and executed with that config. This effectively provides automatic whitelisting of all installed/accessible inventory plugins. - To disable this behavior, remove C(auto) from the C(INVENTORY_ENABLED) config element. ''' EXAMPLES = ''' # This plugin is not intended for direct use; it is a fallback mechanism for automatic whitelisting of # all installed inventory plugins. ''' from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin from ansible.plugins.loader import inventory_loader class InventoryModule(BaseInventoryPlugin): NAME = 'auto' def verify_file(self, path): if not path.endswith('.yml') and not path.endswith('.yaml'): return False return super(InventoryModule, self).verify_file(path) def parse(self, inventory, loader, path, cache=True): config_data = loader.load_from_file(path, cache=False) plugin_name = config_data.get('plugin') if not plugin_name: raise AnsibleParserError("no root 'plugin' key found, '{0}' is not a valid YAML inventory plugin config file".format(path)) plugin = inventory_loader.get(plugin_name) if not plugin: raise AnsibleParserError("inventory config '{0}' specifies unknown plugin '{1}'".format(path, plugin_name)) if not plugin.verify_file(path): raise AnsibleParserError("inventory config '{0}' could not be verified by plugin '{1}'".format(path, plugin_name)) plugin.parse(inventory, loader, path, cache=cache)
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2008 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.primitives; import static com.google.common.primitives.UnsignedBytes.max; import static com.google.common.primitives.UnsignedBytes.min; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; import static java.lang.Byte.toUnsignedInt; import static java.lang.Math.signum; import static org.junit.Assert.assertThrows; import com.google.common.collect.testing.Helpers; import com.google.common.primitives.UnsignedBytes.LexicographicalComparatorHolder.UnsafeComparator; import com.google.common.testing.NullPointerTester; import com.google.common.testing.SerializableTester; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Random; import junit.framework.TestCase; import org.jspecify.annotations.NullUnmarked; /** * Unit test for {@link UnsignedBytes}. * * @author Kevin Bourrillion * @author Louis Wasserman */ @NullUnmarked public class UnsignedBytesTest extends TestCase { private static final byte LEAST = 0; private static final byte GREATEST = (byte) 255; // Only in this class, VALUES must be strictly ascending private static final byte[] VALUES = {LEAST, 127, (byte) 128, (byte) 129, GREATEST}; @SuppressWarnings("InlineMeInliner") // We need to test our method. public void testToInt() { assertThat(UnsignedBytes.toInt((byte) 0)).isEqualTo(0); assertThat(UnsignedBytes.toInt((byte) 1)).isEqualTo(1); assertThat(UnsignedBytes.toInt((byte) 127)).isEqualTo(127); assertThat(UnsignedBytes.toInt((byte) -128)).isEqualTo(128); assertThat(UnsignedBytes.toInt((byte) -127)).isEqualTo(129); assertThat(UnsignedBytes.toInt((byte) -1)).isEqualTo(255); } public void testCheckedCast() { for (byte value : VALUES) { assertThat(UnsignedBytes.checkedCast(toUnsignedInt(value))).isEqualTo(value); } assertCastFails(256L); assertCastFails(-1L); assertCastFails(Long.MAX_VALUE); assertCastFails(Long.MIN_VALUE); } public void testSaturatedCast() { for (byte value : VALUES) { assertThat(UnsignedBytes.saturatedCast(toUnsignedInt(value))).isEqualTo(value); } assertThat(UnsignedBytes.saturatedCast(256L)).isEqualTo(GREATEST); assertThat(UnsignedBytes.saturatedCast(-1L)).isEqualTo(LEAST); assertThat(UnsignedBytes.saturatedCast(Long.MAX_VALUE)).isEqualTo(GREATEST); assertThat(UnsignedBytes.saturatedCast(Long.MIN_VALUE)).isEqualTo(LEAST); } private static void assertCastFails(long value) { try { UnsignedBytes.checkedCast(value); fail("Cast to byte should have failed: " + value); } catch (IllegalArgumentException ex) { assertWithMessage("%s not found in exception text: %s", value, ex.getMessage()) .that(ex.getMessage().contains(String.valueOf(value))) .isTrue(); } } public void testCompare() { // This is the only ordering for primitives that does not have a // corresponding Comparable wrapper in java.lang. for (int i = 0; i < VALUES.length; i++) { for (int j = 0; j < VALUES.length; j++) { byte x = VALUES[i]; byte y = VALUES[j]; // note: spec requires only that the sign is the same assertWithMessage("%s, %s", x, y) .that(signum(UnsignedBytes.compare(x, y))) .isEqualTo(signum(Integer.compare(i, j))); } } } public void testMax_noArgs() { assertThrows(IllegalArgumentException.class, () -> max()); } public void testMax() { assertThat(max(LEAST)).isEqualTo(LEAST); assertThat(max(GREATEST)).isEqualTo(GREATEST); assertThat(max((byte) 0, (byte) -128, (byte) -1, (byte) 127, (byte) 1)).isEqualTo((byte) 255); } public void testMin_noArgs() { assertThrows(IllegalArgumentException.class, () -> min()); } public void testMin() { assertThat(min(LEAST)).isEqualTo(LEAST); assertThat(min(GREATEST)).isEqualTo(GREATEST); assertThat(min((byte) 0, (byte) -128, (byte) -1, (byte) 127, (byte) 1)).isEqualTo((byte) 0); assertThat(min((byte) -1, (byte) 127, (byte) 1, (byte) -128, (byte) 0)).isEqualTo((byte) 0); } private static void assertParseFails(String value) { try { UnsignedBytes.parseUnsignedByte(value); fail(); } catch (NumberFormatException expected) { } } private static void assertParseFails(String value, int radix) { try { UnsignedBytes.parseUnsignedByte(value, radix); fail(); } catch (NumberFormatException expected) { } } public void testParseUnsignedByte() { // We can easily afford to test this exhaustively. for (int i = 0; i <= 0xff; i++) { assertThat(UnsignedBytes.parseUnsignedByte(Integer.toString(i))).isEqualTo((byte) i); } assertParseFails("1000"); assertParseFails("-1"); assertParseFails("-128"); assertParseFails("256"); } public void testMaxValue() { assertThat(UnsignedBytes.compare(UnsignedBytes.MAX_VALUE, (byte) (UnsignedBytes.MAX_VALUE + 1))) .isGreaterThan(0); } public void testParseUnsignedByteWithRadix() throws NumberFormatException { // We can easily afford to test this exhaustively. for (int radix = Character.MIN_RADIX; radix <= Character.MAX_RADIX; radix++) { for (int i = 0; i <= 0xff; i++) { assertThat(UnsignedBytes.parseUnsignedByte(Integer.toString(i, radix), radix)) .isEqualTo((byte) i); } assertParseFails(Integer.toString(1000, radix), radix); assertParseFails(Integer.toString(-1, radix), radix); assertParseFails(Integer.toString(-128, radix), radix); assertParseFails(Integer.toString(256, radix), radix); } } public void testParseUnsignedByteThrowsExceptionForInvalidRadix() { // Valid radix values are Character.MIN_RADIX to Character.MAX_RADIX, // inclusive. assertThrows( NumberFormatException.class, () -> UnsignedBytes.parseUnsignedByte("0", Character.MIN_RADIX - 1)); assertThrows( NumberFormatException.class, () -> UnsignedBytes.parseUnsignedByte("0", Character.MAX_RADIX + 1)); // The radix is used as an array index, so try a negative value. assertThrows(NumberFormatException.class, () -> UnsignedBytes.parseUnsignedByte("0", -1)); } public void testToString() { // We can easily afford to test this exhaustively. for (int i = 0; i <= 0xff; i++) { assertThat(UnsignedBytes.toString((byte) i)).isEqualTo(Integer.toString(i)); } } public void testToStringWithRadix() { // We can easily afford to test this exhaustively. for (int radix = Character.MIN_RADIX; radix <= Character.MAX_RADIX; radix++) { for (int i = 0; i <= 0xff; i++) { assertThat(UnsignedBytes.toString((byte) i, radix)).isEqualTo(Integer.toString(i, radix)); } } } public void testJoin() { assertThat(UnsignedBytes.join(",", new byte[] {})).isEmpty(); assertThat(UnsignedBytes.join(",", new byte[] {(byte) 1})).isEqualTo("1"); assertThat(UnsignedBytes.join(",", (byte) 1, (byte) 2)).isEqualTo("1,2"); assertThat(UnsignedBytes.join("", (byte) 1, (byte) 2, (byte) 3)).isEqualTo("123"); assertThat(UnsignedBytes.join(",", (byte) 128, (byte) -1)).isEqualTo("128,255"); } private static boolean unsafeComparatorAvailable() { return UnsafeComparator.INSTANCE.isFunctional(); } public void testLexicographicalComparatorChoice() { Comparator<byte[]> defaultComparator = UnsignedBytes.lexicographicalComparator(); assertThat(defaultComparator).isNotNull(); assertThat(UnsignedBytes.lexicographicalComparator()).isSameInstanceAs(defaultComparator); if (unsafeComparatorAvailable()) { assertThat(defaultComparator).isInstanceOf(UnsafeComparator.class); } else { assertThat(defaultComparator).isEqualTo(UnsignedBytes.lexicographicalComparatorJavaImpl()); } } public void testLexicographicalComparator() { List<byte[]> ordered = Arrays.asList( new byte[] {}, new byte[] {LEAST}, new byte[] {LEAST, LEAST}, new byte[] {LEAST, (byte) 1}, new byte[] {(byte) 1}, new byte[] {(byte) 1, LEAST}, new byte[] {GREATEST, GREATEST - (byte) 1}, new byte[] {GREATEST, GREATEST}, new byte[] {GREATEST, GREATEST, GREATEST}); // The VarHandle, Unsafe, or Java implementation. Comparator<byte[]> comparator = UnsignedBytes.lexicographicalComparator(); Helpers.testComparator(comparator, ordered); assertThat(SerializableTester.reserialize(comparator)).isSameInstanceAs(comparator); // The Java implementation. Comparator<byte[]> javaImpl = UnsignedBytes.lexicographicalComparatorJavaImpl(); Helpers.testComparator(javaImpl, ordered); assertThat(SerializableTester.reserialize(javaImpl)).isSameInstanceAs(javaImpl); } public void testLexicographicalComparatorLongPseudorandomInputs() { Comparator<byte[]> comparator1 = UnsignedBytes.lexicographicalComparator(); Comparator<byte[]> comparator2 = UnsignedBytes.lexicographicalComparatorJavaImpl(); Random rnd = new Random(714958103); for (int trial = 0; trial < 100; trial++) { byte[] left = new byte[1 + rnd.nextInt(32)]; rnd.nextBytes(left); byte[] right = left.clone(); assertThat(comparator1.compare(left, right)).isEqualTo(0); assertThat(comparator2.compare(left, right)).isEqualTo(0); int i = rnd.nextInt(left.length); left[i] ^= (byte) (1 + rnd.nextInt(255)); assertThat(signum(comparator1.compare(left, right))) .isEqualTo(signum(UnsignedBytes.compare(left[i], right[i]))); assertThat(signum(comparator2.compare(left, right))) .isEqualTo(signum(UnsignedBytes.compare(left[i], right[i]))); } } public void testLexicographicalComparatorLongHandwrittenInputs() { Comparator<byte[]> comparator1 = UnsignedBytes.lexicographicalComparator(); Comparator<byte[]> comparator2 = UnsignedBytes.lexicographicalComparatorJavaImpl(); /* * These arrays are set up to test that the comparator compares bytes within a word in the * correct order—in order words, that it doesn't mix up big-endian and little-endian. The first * array has a smaller element at one index, and then the second array has a smaller element at * the next. */ byte[] a0 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 99, 15, 16, 17}; byte[] b0 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 99, 14, 15, 16, 17}; assertThat(comparator1.compare(a0, b0)).isLessThan(0); assertThat(comparator2.compare(a0, b0)).isLessThan(0); } public void testSort() { testSort(new byte[] {}, new byte[] {}); testSort(new byte[] {2}, new byte[] {2}); testSort(new byte[] {2, 1, 0}, new byte[] {0, 1, 2}); testSort(new byte[] {2, GREATEST, 1, LEAST}, new byte[] {LEAST, 1, 2, GREATEST}); } static void testSort(byte[] input, byte[] expected) { input = Arrays.copyOf(input, input.length); UnsignedBytes.sort(input); assertThat(input).isEqualTo(expected); } static void testSort(byte[] input, int from, int to, byte[] expected) { input = Arrays.copyOf(input, input.length); UnsignedBytes.sort(input, from, to); assertThat(input).isEqualTo(expected); } public void testSortIndexed() { testSort(new byte[] {}, 0, 0, new byte[] {}); testSort(new byte[] {2}, 0, 1, new byte[] {2}); testSort(new byte[] {2, 1, 0}, 0, 2, new byte[] {1, 2, 0}); testSort(new byte[] {2, GREATEST, 1, LEAST}, 1, 4, new byte[] {2, LEAST, 1, GREATEST}); } public void testSortDescending() { testSortDescending(new byte[] {}, new byte[] {}); testSortDescending(new byte[] {1}, new byte[] {1}); testSortDescending(new byte[] {1, 2}, new byte[] {2, 1}); testSortDescending(new byte[] {1, 3, 1}, new byte[] {3, 1, 1}); testSortDescending( new byte[] {GREATEST - 1, 1, GREATEST - 2, 2}, new byte[] {GREATEST - 1, GREATEST - 2, 2, 1}); } private static void testSortDescending(byte[] input, byte[] expectedOutput) { input = Arrays.copyOf(input, input.length); UnsignedBytes.sortDescending(input); assertThat(input).isEqualTo(expectedOutput); } private static void testSortDescending( byte[] input, int fromIndex, int toIndex, byte[] expectedOutput) { input = Arrays.copyOf(input, input.length); UnsignedBytes.sortDescending(input, fromIndex, toIndex); assertThat(input).isEqualTo(expectedOutput); } public void testSortDescendingIndexed() { testSortDescending(new byte[] {}, 0, 0, new byte[] {}); testSortDescending(new byte[] {1}, 0, 1, new byte[] {1}); testSortDescending(new byte[] {1, 2}, 0, 2, new byte[] {2, 1}); testSortDescending(new byte[] {1, 3, 1}, 0, 2, new byte[] {3, 1, 1}); testSortDescending(new byte[] {1, 3, 1}, 0, 1, new byte[] {1, 3, 1}); testSortDescending( new byte[] {GREATEST - 1, 1, GREATEST - 2, 2}, 1, 3, new byte[] {GREATEST - 1, GREATEST - 2, 1, 2}); } public void testNulls() { new NullPointerTester().testAllPublicStaticMethods(UnsignedBytes.class); } }
java
github
https://github.com/google/guava
android/guava-tests/test/com/google/common/primitives/UnsignedBytesTest.java
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for # high-level documentation on how this system works. # # This module is closely integrated with zerver/lib/event_schema.py # and zerver/lib/data_types.py systems for validating the schemas of # events; it also uses the OpenAPI tools to validate our documentation. import copy import time from io import StringIO from typing import Any, Callable, Dict, List, Optional, Set from unittest import mock import orjson from django.utils.timezone import now as timezone_now from zerver.lib.actions import ( bulk_add_members_to_user_group, bulk_add_subscriptions, bulk_remove_subscriptions, check_add_realm_emoji, check_add_user_group, check_delete_user_group, check_send_typing_notification, do_add_alert_words, do_add_default_stream, do_add_linkifier, do_add_reaction, do_add_realm_domain, do_add_realm_playground, do_add_streams_to_default_stream_group, do_add_submessage, do_change_avatar_fields, do_change_bot_owner, do_change_default_all_public_streams, do_change_default_events_register_stream, do_change_default_sending_stream, do_change_default_stream_group_description, do_change_default_stream_group_name, do_change_full_name, do_change_icon_source, do_change_logo_source, do_change_notification_settings, do_change_plan_type, do_change_realm_domain, do_change_stream_description, do_change_stream_invite_only, do_change_stream_message_retention_days, do_change_stream_post_policy, do_change_subscription_property, do_change_user_delivery_email, do_change_user_role, do_create_default_stream_group, do_create_multiuse_invite_link, do_create_user, do_deactivate_realm, do_deactivate_stream, do_deactivate_user, do_delete_messages, do_invite_users, do_make_user_billing_admin, do_mark_hotspot_as_read, do_mute_topic, do_mute_user, do_reactivate_user, do_regenerate_api_key, do_remove_alert_words, do_remove_default_stream, do_remove_default_stream_group, do_remove_linkifier, do_remove_reaction, do_remove_realm_custom_profile_field, do_remove_realm_domain, do_remove_realm_emoji, do_remove_realm_playground, do_remove_streams_from_default_stream_group, do_rename_stream, do_revoke_multi_use_invite, do_revoke_user_invite, do_send_stream_typing_notification, do_set_realm_authentication_methods, do_set_realm_message_editing, do_set_realm_notifications_stream, do_set_realm_property, do_set_realm_signup_notifications_stream, do_set_user_display_setting, do_set_zoom_token, do_unmute_topic, do_unmute_user, do_update_embedded_data, do_update_linkifier, do_update_message, do_update_message_flags, do_update_outgoing_webhook_service, do_update_user_custom_profile_data_if_changed, do_update_user_group_description, do_update_user_group_name, do_update_user_presence, do_update_user_status, lookup_default_stream_groups, remove_members_from_user_group, try_add_realm_custom_profile_field, try_update_realm_custom_profile_field, ) from zerver.lib.event_schema import ( check_alert_words, check_attachment_add, check_attachment_remove, check_attachment_update, check_custom_profile_fields, check_default_stream_groups, check_default_streams, check_delete_message, check_has_zoom_token, check_heartbeat, check_hotspots, check_invites_changed, check_message, check_muted_topics, check_muted_users, check_presence, check_reaction_add, check_reaction_remove, check_realm_bot_add, check_realm_bot_delete, check_realm_bot_remove, check_realm_bot_update, check_realm_deactivated, check_realm_domains_add, check_realm_domains_change, check_realm_domains_remove, check_realm_emoji_update, check_realm_export, check_realm_filters, check_realm_linkifiers, check_realm_playgrounds, check_realm_update, check_realm_update_dict, check_realm_user_add, check_realm_user_remove, check_realm_user_update, check_stream_create, check_stream_delete, check_stream_update, check_submessage, check_subscription_add, check_subscription_peer_add, check_subscription_peer_remove, check_subscription_remove, check_subscription_update, check_typing_start, check_typing_stop, check_update_display_settings, check_update_global_notifications, check_update_message, check_update_message_embedded, check_update_message_flags_add, check_update_message_flags_remove, check_user_group_add, check_user_group_add_members, check_user_group_remove, check_user_group_remove_members, check_user_group_update, check_user_status, ) from zerver.lib.events import ( RestartEventException, apply_events, fetch_initial_state_data, post_process_state, ) from zerver.lib.mention import MentionData from zerver.lib.message import render_markdown from zerver.lib.test_classes import ZulipTestCase from zerver.lib.test_helpers import ( create_dummy_file, get_subscription, get_test_image_file, reset_emails_in_zulip_realm, stdout_suppressed, ) from zerver.lib.topic import TOPIC_NAME from zerver.lib.user_mutes import get_mute_object from zerver.models import ( Attachment, CustomProfileField, Message, MultiuseInvite, PreregistrationUser, Realm, RealmAuditLog, RealmDomain, RealmPlayground, Service, Stream, UserGroup, UserMessage, UserPresence, UserProfile, get_client, get_stream, get_user_by_delivery_email, ) from zerver.openapi.openapi import validate_against_openapi_schema from zerver.tornado.django_api import send_event from zerver.tornado.event_queue import ( allocate_client_descriptor, clear_client_event_queues_for_testing, create_heartbeat_event, send_restart_events, ) from zerver.views.realm_playgrounds import access_playground_by_id class BaseAction(ZulipTestCase): """Core class for verifying the apply_event race handling logic as well as the event formatting logic of any function using send_event. See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html#testing for extensive design details for this testing system. """ def setUp(self) -> None: super().setUp() self.user_profile = self.example_user("hamlet") def verify_action( self, action: Callable[[], object], *, event_types: Optional[List[str]] = None, include_subscribers: bool = True, state_change_expected: bool = True, notification_settings_null: bool = False, client_gravatar: bool = True, user_avatar_url_field_optional: bool = False, slim_presence: bool = False, include_streams: bool = True, num_events: int = 1, bulk_message_deletion: bool = True, stream_typing_notifications: bool = True, ) -> List[Dict[str, Any]]: """ Make sure we have a clean slate of client descriptors for these tests. If we don't do this, then certain failures will only manifest when you run multiple tests within a single test function. See also https://zulip.readthedocs.io/en/latest/subsystems/events-system.html#testing for details on the design of this test system. """ clear_client_event_queues_for_testing() client = allocate_client_descriptor( dict( user_profile_id=self.user_profile.id, realm_id=self.user_profile.realm_id, event_types=event_types, client_type_name="website", apply_markdown=True, client_gravatar=client_gravatar, slim_presence=slim_presence, all_public_streams=False, queue_timeout=600, last_connection_time=time.time(), narrow=[], bulk_message_deletion=bulk_message_deletion, stream_typing_notifications=stream_typing_notifications, ) ) # hybrid_state = initial fetch state + re-applying events triggered by our action # normal_state = do action then fetch at the end (the "normal" code path) hybrid_state = fetch_initial_state_data( self.user_profile, event_types=event_types, client_gravatar=client_gravatar, user_avatar_url_field_optional=user_avatar_url_field_optional, slim_presence=slim_presence, include_subscribers=include_subscribers, include_streams=include_streams, ) # We want even those `send_event` calls which have been hooked to # `transaction.on_commit` to execute in tests. # See the comment in `ZulipTestCase.tornado_redirected_to_list`. with self.captureOnCommitCallbacks(execute=True): action() events = client.event_queue.contents() content = { "queue_id": "123.12", # The JSON wrapper helps in converting tuples to lists # as tuples aren't valid JSON structure. "events": orjson.loads(orjson.dumps(events)), "msg": "", "result": "success", } validate_against_openapi_schema(content, "/events", "get", "200", display_brief_error=True) self.assert_length(events, num_events) initial_state = copy.deepcopy(hybrid_state) post_process_state(self.user_profile, initial_state, notification_settings_null) before = orjson.dumps(initial_state) apply_events( self.user_profile, state=hybrid_state, events=events, fetch_event_types=None, client_gravatar=client_gravatar, slim_presence=slim_presence, include_subscribers=include_subscribers, ) post_process_state(self.user_profile, hybrid_state, notification_settings_null) after = orjson.dumps(hybrid_state) if state_change_expected: if before == after: # nocoverage print(orjson.dumps(initial_state, option=orjson.OPT_INDENT_2).decode()) print(events) raise AssertionError( "Test does not exercise enough code -- events do not change state." ) else: try: self.match_states(initial_state, copy.deepcopy(hybrid_state), events) except AssertionError: # nocoverage raise AssertionError("Test is invalid--state actually does change here.") normal_state = fetch_initial_state_data( self.user_profile, event_types=event_types, client_gravatar=client_gravatar, user_avatar_url_field_optional=user_avatar_url_field_optional, slim_presence=slim_presence, include_subscribers=include_subscribers, include_streams=include_streams, ) post_process_state(self.user_profile, normal_state, notification_settings_null) self.match_states(hybrid_state, normal_state, events) return events def match_states( self, state1: Dict[str, Any], state2: Dict[str, Any], events: List[Dict[str, Any]] ) -> None: def normalize(state: Dict[str, Any]) -> None: for u in state["never_subscribed"]: if "subscribers" in u: u["subscribers"].sort() for u in state["subscriptions"]: if "subscribers" in u: u["subscribers"].sort() state["subscriptions"] = {u["name"]: u for u in state["subscriptions"]} state["unsubscribed"] = {u["name"]: u for u in state["unsubscribed"]} if "realm_bots" in state: state["realm_bots"] = {u["email"]: u for u in state["realm_bots"]} # Since time is different for every call, just fix the value state["server_timestamp"] = 0 normalize(state1) normalize(state2) # If this assertions fails, we have unusual problems. self.assertEqual(state1.keys(), state2.keys()) # The far more likely scenario is that some section of # our enormous payload does not get updated properly. We # want the diff here to be developer-friendly, hence # the somewhat tedious code to provide useful output. if state1 != state2: # nocoverage print("\n---States DO NOT MATCH---") print("\nEVENTS:\n") # Printing out the events is a big help to # developers. import json for event in events: print(json.dumps(event, indent=4)) print("\nMISMATCHES:\n") for k in state1: if state1[k] != state2[k]: print("\nkey = " + k) try: self.assertEqual({k: state1[k]}, {k: state2[k]}) except AssertionError as e: print(e) print( """ NOTE: This is an advanced test that verifies how we apply events after fetching data. If you do not know how to debug it, you can ask for help on chat. """, flush=True, ) raise AssertionError("Mismatching states") class NormalActionsTest(BaseAction): def create_bot(self, email: str, **extras: Any) -> UserProfile: return self.create_test_bot(email, self.user_profile, **extras) def test_mentioned_send_message_events(self) -> None: user = self.example_user("hamlet") for i in range(3): content = "mentioning... @**" + user.full_name + "** hello " + str(i) self.verify_action( lambda: self.send_stream_message(self.example_user("cordelia"), "Verona", content), ) def test_wildcard_mentioned_send_message_events(self) -> None: for i in range(3): content = "mentioning... @**all** hello " + str(i) self.verify_action( lambda: self.send_stream_message(self.example_user("cordelia"), "Verona", content), ) def test_pm_send_message_events(self) -> None: self.verify_action( lambda: self.send_personal_message( self.example_user("cordelia"), self.example_user("hamlet"), "hola" ), ) def test_huddle_send_message_events(self) -> None: huddle = [ self.example_user("hamlet"), self.example_user("othello"), ] self.verify_action( lambda: self.send_huddle_message(self.example_user("cordelia"), huddle, "hola"), ) def test_stream_send_message_events(self) -> None: events = self.verify_action( lambda: self.send_stream_message(self.example_user("hamlet"), "Verona", "hello"), client_gravatar=False, ) check_message("events[0]", events[0]) assert isinstance(events[0]["message"]["avatar_url"], str) events = self.verify_action( lambda: self.send_stream_message(self.example_user("hamlet"), "Verona", "hello"), client_gravatar=True, ) check_message("events[0]", events[0]) assert events[0]["message"]["avatar_url"] is None # Verify message editing message = Message.objects.order_by("-id")[0] topic = "new_topic" propagate_mode = "change_all" content = "new content" rendering_result = render_markdown(message, content) prior_mention_user_ids: Set[int] = set() mention_data = MentionData( realm_id=self.user_profile.realm_id, content=content, ) events = self.verify_action( lambda: do_update_message( self.user_profile, message, None, topic, propagate_mode, False, False, content, rendering_result, prior_mention_user_ids, mention_data, ), state_change_expected=True, ) check_update_message( "events[0]", events[0], has_content=True, has_topic=True, has_new_stream_id=False, ) content = "embed_content" rendering_result = render_markdown(message, content) events = self.verify_action( lambda: do_update_embedded_data(self.user_profile, message, content, rendering_result), state_change_expected=False, ) check_update_message_embedded("events[0]", events[0]) # Verify move topic to different stream. # Send 2 messages in "test" topic. self.send_stream_message(self.user_profile, "Verona") message_id = self.send_stream_message(self.user_profile, "Verona") message = Message.objects.get(id=message_id) topic = "new_topic" stream = get_stream("Denmark", self.user_profile.realm) propagate_mode = "change_all" prior_mention_user_ids = set() events = self.verify_action( lambda: do_update_message( self.user_profile, message, stream, topic, propagate_mode, True, True, None, None, set(), None, ), state_change_expected=True, # There are 3 events generated for this action # * update_message: For updating existing messages # * 2 new message events: Breadcrumb messages in the new and old topics. num_events=3, ) check_update_message( "events[0]", events[0], has_content=False, has_topic=True, has_new_stream_id=True, ) def test_update_message_flags(self) -> None: # Test message flag update events message = self.send_personal_message( self.example_user("cordelia"), self.example_user("hamlet"), "hello", ) user_profile = self.example_user("hamlet") events = self.verify_action( lambda: do_update_message_flags( user_profile, get_client("website"), "add", "starred", [message] ), state_change_expected=True, ) check_update_message_flags_add("events[0]", events[0]) events = self.verify_action( lambda: do_update_message_flags( user_profile, get_client("website"), "remove", "starred", [message] ), state_change_expected=True, ) check_update_message_flags_remove("events[0]", events[0]) def test_update_read_flag_removes_unread_msg_ids(self) -> None: user_profile = self.example_user("hamlet") mention = "@**" + user_profile.full_name + "**" for content in ["hello", mention]: message = self.send_stream_message( self.example_user("cordelia"), "Verona", content, ) self.verify_action( lambda: do_update_message_flags( user_profile, get_client("website"), "add", "read", [message] ), state_change_expected=True, ) def test_send_message_to_existing_recipient(self) -> None: sender = self.example_user("cordelia") self.send_stream_message( sender, "Verona", "hello 1", ) self.verify_action( lambda: self.send_stream_message(sender, "Verona", "hello 2"), state_change_expected=True, ) def test_add_reaction(self) -> None: message_id = self.send_stream_message(self.example_user("hamlet"), "Verona", "hello") message = Message.objects.get(id=message_id) events = self.verify_action( lambda: do_add_reaction(self.user_profile, message, "tada", "1f389", "unicode_emoji"), state_change_expected=False, ) check_reaction_add("events[0]", events[0]) def test_heartbeat_event(self) -> None: events = self.verify_action( lambda: send_event( self.user_profile.realm, create_heartbeat_event(), [self.user_profile.id], ), state_change_expected=False, ) check_heartbeat("events[0]", events[0]) def test_add_submessage(self) -> None: cordelia = self.example_user("cordelia") stream_name = "Verona" message_id = self.send_stream_message( sender=cordelia, stream_name=stream_name, ) events = self.verify_action( lambda: do_add_submessage( realm=cordelia.realm, sender_id=cordelia.id, message_id=message_id, msg_type="whatever", content='"stuff"', ), state_change_expected=False, ) check_submessage("events[0]", events[0]) def test_remove_reaction(self) -> None: message_id = self.send_stream_message(self.example_user("hamlet"), "Verona", "hello") message = Message.objects.get(id=message_id) do_add_reaction(self.user_profile, message, "tada", "1f389", "unicode_emoji") events = self.verify_action( lambda: do_remove_reaction(self.user_profile, message, "1f389", "unicode_emoji"), state_change_expected=False, ) check_reaction_remove("events[0]", events[0]) def test_invite_user_event(self) -> None: self.user_profile = self.example_user("iago") streams = [] for stream_name in ["Denmark", "Scotland"]: streams.append(get_stream(stream_name, self.user_profile.realm)) events = self.verify_action( lambda: do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False), state_change_expected=False, ) check_invites_changed("events[0]", events[0]) def test_create_multiuse_invite_event(self) -> None: self.user_profile = self.example_user("iago") streams = [] for stream_name in ["Denmark", "Verona"]: streams.append(get_stream(stream_name, self.user_profile.realm)) events = self.verify_action( lambda: do_create_multiuse_invite_link( self.user_profile, PreregistrationUser.INVITE_AS["MEMBER"], streams ), state_change_expected=False, ) check_invites_changed("events[0]", events[0]) def test_revoke_user_invite_event(self) -> None: self.user_profile = self.example_user("iago") streams = [] for stream_name in ["Denmark", "Verona"]: streams.append(get_stream(stream_name, self.user_profile.realm)) do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False) prereg_users = PreregistrationUser.objects.filter( referred_by__realm=self.user_profile.realm ) events = self.verify_action( lambda: do_revoke_user_invite(prereg_users[0]), state_change_expected=False, ) check_invites_changed("events[0]", events[0]) def test_revoke_multiuse_invite_event(self) -> None: self.user_profile = self.example_user("iago") streams = [] for stream_name in ["Denmark", "Verona"]: streams.append(get_stream(stream_name, self.user_profile.realm)) do_create_multiuse_invite_link( self.user_profile, PreregistrationUser.INVITE_AS["MEMBER"], streams ) multiuse_object = MultiuseInvite.objects.get() events = self.verify_action( lambda: do_revoke_multi_use_invite(multiuse_object), state_change_expected=False, ) check_invites_changed("events[0]", events[0]) def test_invitation_accept_invite_event(self) -> None: reset_emails_in_zulip_realm() self.user_profile = self.example_user("iago") streams = [] for stream_name in ["Denmark", "Scotland"]: streams.append(get_stream(stream_name, self.user_profile.realm)) do_invite_users(self.user_profile, ["foo@zulip.com"], streams, False) prereg_user = PreregistrationUser.objects.get(email="foo@zulip.com") events = self.verify_action( lambda: do_create_user( "foo@zulip.com", "password", self.user_profile.realm, "full name", prereg_user=prereg_user, acting_user=None, ), state_change_expected=True, num_events=5, ) check_invites_changed("events[3]", events[3]) def test_typing_events(self) -> None: events = self.verify_action( lambda: check_send_typing_notification( self.user_profile, [self.example_user("cordelia").id], "start" ), state_change_expected=False, ) check_typing_start("events[0]", events[0]) events = self.verify_action( lambda: check_send_typing_notification( self.user_profile, [self.example_user("cordelia").id], "stop" ), state_change_expected=False, ) check_typing_stop("events[0]", events[0]) def test_stream_typing_events(self) -> None: stream = get_stream("Denmark", self.user_profile.realm) topic = "streams typing" events = self.verify_action( lambda: do_send_stream_typing_notification( self.user_profile, "start", stream, topic, ), state_change_expected=False, ) check_typing_start("events[0]", events[0]) events = self.verify_action( lambda: do_send_stream_typing_notification( self.user_profile, "stop", stream, topic, ), state_change_expected=False, ) check_typing_stop("events[0]", events[0]) # Having client_capability `stream_typing_notification=False` # shouldn't produce any events. events = self.verify_action( lambda: do_send_stream_typing_notification( self.user_profile, "start", stream, topic, ), state_change_expected=False, stream_typing_notifications=False, num_events=0, ) self.assertEqual(events, []) events = self.verify_action( lambda: do_send_stream_typing_notification( self.user_profile, "stop", stream, topic, ), state_change_expected=False, stream_typing_notifications=False, num_events=0, ) self.assertEqual(events, []) def test_custom_profile_fields_events(self) -> None: realm = self.user_profile.realm events = self.verify_action( lambda: try_add_realm_custom_profile_field( realm=realm, name="Expertise", field_type=CustomProfileField.LONG_TEXT ) ) check_custom_profile_fields("events[0]", events[0]) field = realm.customprofilefield_set.get(realm=realm, name="Biography") name = field.name hint = "Biography of the user" events = self.verify_action( lambda: try_update_realm_custom_profile_field(realm, field, name, hint=hint) ) check_custom_profile_fields("events[0]", events[0]) events = self.verify_action(lambda: do_remove_realm_custom_profile_field(realm, field)) check_custom_profile_fields("events[0]", events[0]) def test_custom_profile_field_data_events(self) -> None: field_id = self.user_profile.realm.customprofilefield_set.get( realm=self.user_profile.realm, name="Biography" ).id field = { "id": field_id, "value": "New value", } events = self.verify_action( lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]) ) check_realm_user_update("events[0]", events[0], "custom_profile_field") self.assertEqual( events[0]["person"]["custom_profile_field"].keys(), {"id", "value", "rendered_value"} ) # Test we pass correct stringify value in custom-user-field data event field_id = self.user_profile.realm.customprofilefield_set.get( realm=self.user_profile.realm, name="Mentor" ).id field = { "id": field_id, "value": [self.example_user("ZOE").id], } events = self.verify_action( lambda: do_update_user_custom_profile_data_if_changed(self.user_profile, [field]) ) check_realm_user_update("events[0]", events[0], "custom_profile_field") self.assertEqual(events[0]["person"]["custom_profile_field"].keys(), {"id", "value"}) def test_presence_events(self) -> None: events = self.verify_action( lambda: do_update_user_presence( self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE ), slim_presence=False, ) check_presence( "events[0]", events[0], has_email=True, presence_key="website", status="active", ) events = self.verify_action( lambda: do_update_user_presence( self.example_user("cordelia"), get_client("website"), timezone_now(), UserPresence.ACTIVE, ), slim_presence=True, ) check_presence( "events[0]", events[0], has_email=False, presence_key="website", status="active", ) def test_presence_events_multiple_clients(self) -> None: self.api_post( self.user_profile, "/api/v1/users/me/presence", {"status": "idle"}, HTTP_USER_AGENT="ZulipAndroid/1.0", ) self.verify_action( lambda: do_update_user_presence( self.user_profile, get_client("website"), timezone_now(), UserPresence.ACTIVE ) ) events = self.verify_action( lambda: do_update_user_presence( self.user_profile, get_client("ZulipAndroid/1.0"), timezone_now(), UserPresence.IDLE ) ) check_presence( "events[0]", events[0], has_email=True, presence_key="ZulipAndroid/1.0", status="idle", ) def test_register_events(self) -> None: events = self.verify_action(lambda: self.register("test1@zulip.com", "test1"), num_events=3) self.assert_length(events, 3) check_realm_user_add("events[0]", events[0]) new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm) self.assertEqual(new_user_profile.delivery_email, "test1@zulip.com") check_subscription_peer_add("events[1]", events[1]) check_message("events[2]", events[2]) self.assertIn( f'data-user-id="{new_user_profile.id}">test1_zulip.com</span> just signed up for Zulip', events[2]["message"]["content"], ) def test_register_events_email_address_visibility(self) -> None: do_set_realm_property( self.user_profile.realm, "email_address_visibility", Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS, acting_user=None, ) events = self.verify_action(lambda: self.register("test1@zulip.com", "test1"), num_events=3) self.assert_length(events, 3) check_realm_user_add("events[0]", events[0]) new_user_profile = get_user_by_delivery_email("test1@zulip.com", self.user_profile.realm) self.assertEqual(new_user_profile.email, f"user{new_user_profile.id}@zulip.testserver") check_subscription_peer_add("events[1]", events[1]) check_message("events[2]", events[2]) self.assertIn( f'data-user-id="{new_user_profile.id}">test1_zulip.com</span> just signed up for Zulip', events[2]["message"]["content"], ) def test_alert_words_events(self) -> None: events = self.verify_action(lambda: do_add_alert_words(self.user_profile, ["alert_word"])) check_alert_words("events[0]", events[0]) events = self.verify_action( lambda: do_remove_alert_words(self.user_profile, ["alert_word"]) ) check_alert_words("events[0]", events[0]) def test_away_events(self) -> None: client = get_client("website") events = self.verify_action( lambda: do_update_user_status( user_profile=self.user_profile, away=True, status_text="out to lunch", client_id=client.id, ) ) check_user_status("events[0]", events[0], {"away", "status_text"}) events = self.verify_action( lambda: do_update_user_status( user_profile=self.user_profile, away=False, status_text="", client_id=client.id ) ) check_user_status("events[0]", events[0], {"away", "status_text"}) events = self.verify_action( lambda: do_update_user_status( user_profile=self.user_profile, away=True, status_text=None, client_id=client.id ) ) check_user_status("events[0]", events[0], {"away"}) events = self.verify_action( lambda: do_update_user_status( user_profile=self.user_profile, away=None, status_text="at the beach", client_id=client.id, ) ) check_user_status("events[0]", events[0], {"status_text"}) def test_user_group_events(self) -> None: othello = self.example_user("othello") events = self.verify_action( lambda: check_add_user_group( self.user_profile.realm, "backend", [othello], "Backend team" ) ) check_user_group_add("events[0]", events[0]) # Test name update backend = UserGroup.objects.get(name="backend") events = self.verify_action(lambda: do_update_user_group_name(backend, "backendteam")) check_user_group_update("events[0]", events[0], "name") # Test description update description = "Backend team to deal with backend code." events = self.verify_action(lambda: do_update_user_group_description(backend, description)) check_user_group_update("events[0]", events[0], "description") # Test add members hamlet = self.example_user("hamlet") events = self.verify_action(lambda: bulk_add_members_to_user_group(backend, [hamlet])) check_user_group_add_members("events[0]", events[0]) # Test remove members hamlet = self.example_user("hamlet") events = self.verify_action(lambda: remove_members_from_user_group(backend, [hamlet])) check_user_group_remove_members("events[0]", events[0]) # Test remove event events = self.verify_action(lambda: check_delete_user_group(backend.id, othello)) check_user_group_remove("events[0]", events[0]) def test_default_stream_groups_events(self) -> None: streams = [] for stream_name in ["Scotland", "Rome", "Denmark"]: streams.append(get_stream(stream_name, self.user_profile.realm)) events = self.verify_action( lambda: do_create_default_stream_group( self.user_profile.realm, "group1", "This is group1", streams ) ) check_default_stream_groups("events[0]", events[0]) group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0] venice_stream = get_stream("Venice", self.user_profile.realm) events = self.verify_action( lambda: do_add_streams_to_default_stream_group( self.user_profile.realm, group, [venice_stream] ) ) check_default_stream_groups("events[0]", events[0]) events = self.verify_action( lambda: do_remove_streams_from_default_stream_group( self.user_profile.realm, group, [venice_stream] ) ) check_default_stream_groups("events[0]", events[0]) events = self.verify_action( lambda: do_change_default_stream_group_description( self.user_profile.realm, group, "New description" ) ) check_default_stream_groups("events[0]", events[0]) events = self.verify_action( lambda: do_change_default_stream_group_name( self.user_profile.realm, group, "New group name" ) ) check_default_stream_groups("events[0]", events[0]) events = self.verify_action( lambda: do_remove_default_stream_group(self.user_profile.realm, group) ) check_default_stream_groups("events[0]", events[0]) def test_default_stream_group_events_guest(self) -> None: streams = [] for stream_name in ["Scotland", "Rome", "Denmark"]: streams.append(get_stream(stream_name, self.user_profile.realm)) do_create_default_stream_group(self.user_profile.realm, "group1", "This is group1", streams) group = lookup_default_stream_groups(["group1"], self.user_profile.realm)[0] do_change_user_role(self.user_profile, UserProfile.ROLE_GUEST, acting_user=None) venice_stream = get_stream("Venice", self.user_profile.realm) self.verify_action( lambda: do_add_streams_to_default_stream_group( self.user_profile.realm, group, [venice_stream] ), state_change_expected=False, num_events=0, ) def test_default_streams_events(self) -> None: stream = get_stream("Scotland", self.user_profile.realm) events = self.verify_action(lambda: do_add_default_stream(stream)) check_default_streams("events[0]", events[0]) events = self.verify_action(lambda: do_remove_default_stream(stream)) check_default_streams("events[0]", events[0]) def test_default_streams_events_guest(self) -> None: do_change_user_role(self.user_profile, UserProfile.ROLE_GUEST, acting_user=None) stream = get_stream("Scotland", self.user_profile.realm) self.verify_action( lambda: do_add_default_stream(stream), state_change_expected=False, num_events=0 ) self.verify_action( lambda: do_remove_default_stream(stream), state_change_expected=False, num_events=0 ) def test_muted_topics_events(self) -> None: stream = get_stream("Denmark", self.user_profile.realm) events = self.verify_action(lambda: do_mute_topic(self.user_profile, stream, "topic")) check_muted_topics("events[0]", events[0]) events = self.verify_action(lambda: do_unmute_topic(self.user_profile, stream, "topic")) check_muted_topics("events[0]", events[0]) def test_muted_users_events(self) -> None: muted_user = self.example_user("othello") events = self.verify_action( lambda: do_mute_user(self.user_profile, muted_user), num_events=2 ) check_update_message_flags_add("events[0]", events[0]) check_muted_users("events[1]", events[1]) mute_object = get_mute_object(self.user_profile, muted_user) assert mute_object is not None # This is a hack to silence mypy errors which result from it not taking # into account type restrictions for nested functions (here, `lambda`). # https://github.com/python/mypy/commit/8780d45507ab1efba33568744967674cce7184d1 mute_object2 = mute_object events = self.verify_action(lambda: do_unmute_user(mute_object2)) check_muted_users("events[0]", events[0]) def test_change_avatar_fields(self) -> None: events = self.verify_action( lambda: do_change_avatar_fields( self.user_profile, UserProfile.AVATAR_FROM_USER, acting_user=self.user_profile ), ) check_realm_user_update("events[0]", events[0], "avatar_fields") assert isinstance(events[0]["person"]["avatar_url"], str) assert isinstance(events[0]["person"]["avatar_url_medium"], str) events = self.verify_action( lambda: do_change_avatar_fields( self.user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=self.user_profile ), ) check_realm_user_update("events[0]", events[0], "avatar_fields") self.assertEqual(events[0]["person"]["avatar_url"], None) self.assertEqual(events[0]["person"]["avatar_url_medium"], None) def test_change_full_name(self) -> None: events = self.verify_action( lambda: do_change_full_name(self.user_profile, "Sir Hamlet", self.user_profile) ) check_realm_user_update("events[0]", events[0], "full_name") def test_change_user_delivery_email_email_address_visibilty_admins(self) -> None: do_set_realm_property( self.user_profile.realm, "email_address_visibility", Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS, acting_user=None, ) # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() action = lambda: do_change_user_delivery_email(self.user_profile, "newhamlet@zulip.com") events = self.verify_action(action, num_events=2, client_gravatar=False) check_realm_user_update("events[0]", events[0], "delivery_email") check_realm_user_update("events[1]", events[1], "avatar_fields") assert isinstance(events[1]["person"]["avatar_url"], str) assert isinstance(events[1]["person"]["avatar_url_medium"], str) def test_change_realm_authentication_methods(self) -> None: def fake_backends() -> Any: backends = ( "zproject.backends.DevAuthBackend", "zproject.backends.EmailAuthBackend", "zproject.backends.GitHubAuthBackend", "zproject.backends.GoogleAuthBackend", "zproject.backends.ZulipLDAPAuthBackend", ) return self.settings(AUTHENTICATION_BACKENDS=backends) # Test transitions; any new backends should be tested with T/T/T/F/T for auth_method_dict in ( {"Google": True, "Email": True, "GitHub": True, "LDAP": False, "Dev": False}, {"Google": True, "Email": True, "GitHub": False, "LDAP": False, "Dev": False}, {"Google": True, "Email": False, "GitHub": False, "LDAP": False, "Dev": False}, {"Google": True, "Email": False, "GitHub": True, "LDAP": False, "Dev": False}, {"Google": False, "Email": False, "GitHub": False, "LDAP": False, "Dev": True}, {"Google": False, "Email": False, "GitHub": True, "LDAP": False, "Dev": True}, {"Google": False, "Email": True, "GitHub": True, "LDAP": True, "Dev": False}, ): with fake_backends(): events = self.verify_action( lambda: do_set_realm_authentication_methods( self.user_profile.realm, auth_method_dict, acting_user=None ) ) check_realm_update_dict("events[0]", events[0]) def test_change_pin_stream(self) -> None: stream = get_stream("Denmark", self.user_profile.realm) sub = get_subscription(stream.name, self.user_profile) do_change_subscription_property( self.user_profile, sub, stream, "pin_to_top", False, acting_user=None ) for pinned in (True, False): events = self.verify_action( lambda: do_change_subscription_property( self.user_profile, sub, stream, "pin_to_top", pinned, acting_user=None ) ) check_subscription_update( "events[0]", events[0], property="pin_to_top", value=pinned, ) def test_change_stream_notification_settings(self) -> None: for setting_name in ["email_notifications"]: stream = get_stream("Denmark", self.user_profile.realm) sub = get_subscription(stream.name, self.user_profile) # First test with notification_settings_null enabled for value in (True, False): events = self.verify_action( lambda: do_change_subscription_property( self.user_profile, sub, stream, setting_name, value, acting_user=None ), notification_settings_null=True, ) check_subscription_update( "events[0]", events[0], property=setting_name, value=value, ) for value in (True, False): events = self.verify_action( lambda: do_change_subscription_property( self.user_profile, sub, stream, setting_name, value, acting_user=None ) ) check_subscription_update( "events[0]", events[0], property=setting_name, value=value, ) def test_change_realm_message_edit_settings(self) -> None: # Test every transition among the four possibilities {T,F} x {0, non-0} for (allow_message_editing, message_content_edit_limit_seconds) in ( (True, 0), (False, 0), (False, 1234), (True, 600), (False, 0), (True, 1234), ): events = self.verify_action( lambda: do_set_realm_message_editing( self.user_profile.realm, allow_message_editing, message_content_edit_limit_seconds, Realm.POLICY_ADMINS_ONLY, acting_user=None, ) ) check_realm_update_dict("events[0]", events[0]) def test_change_realm_notifications_stream(self) -> None: stream = get_stream("Rome", self.user_profile.realm) for notifications_stream, notifications_stream_id in ((stream, stream.id), (None, -1)): events = self.verify_action( lambda: do_set_realm_notifications_stream( self.user_profile.realm, notifications_stream, notifications_stream_id, acting_user=None, ) ) check_realm_update("events[0]", events[0], "notifications_stream_id") def test_change_realm_signup_notifications_stream(self) -> None: stream = get_stream("Rome", self.user_profile.realm) for signup_notifications_stream, signup_notifications_stream_id in ( (stream, stream.id), (None, -1), ): events = self.verify_action( lambda: do_set_realm_signup_notifications_stream( self.user_profile.realm, signup_notifications_stream, signup_notifications_stream_id, acting_user=None, ) ) check_realm_update("events[0]", events[0], "signup_notifications_stream_id") def test_change_is_admin(self) -> None: reset_emails_in_zulip_realm() # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() do_change_user_role(self.user_profile, UserProfile.ROLE_MEMBER, acting_user=None) for role in [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_MEMBER]: events = self.verify_action( lambda: do_change_user_role(self.user_profile, role, acting_user=None) ) check_realm_user_update("events[0]", events[0], "role") self.assertEqual(events[0]["person"]["role"], role) def test_change_is_billing_admin(self) -> None: reset_emails_in_zulip_realm() # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() events = self.verify_action(lambda: do_make_user_billing_admin(self.user_profile)) check_realm_user_update("events[0]", events[0], "is_billing_admin") self.assertEqual(events[0]["person"]["is_billing_admin"], True) def test_change_is_owner(self) -> None: reset_emails_in_zulip_realm() # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() do_change_user_role(self.user_profile, UserProfile.ROLE_MEMBER, acting_user=None) for role in [UserProfile.ROLE_REALM_OWNER, UserProfile.ROLE_MEMBER]: events = self.verify_action( lambda: do_change_user_role(self.user_profile, role, acting_user=None) ) check_realm_user_update("events[0]", events[0], "role") self.assertEqual(events[0]["person"]["role"], role) def test_change_is_moderator(self) -> None: reset_emails_in_zulip_realm() # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() do_change_user_role(self.user_profile, UserProfile.ROLE_MEMBER, acting_user=None) for role in [UserProfile.ROLE_MODERATOR, UserProfile.ROLE_MEMBER]: events = self.verify_action( lambda: do_change_user_role(self.user_profile, role, acting_user=None) ) check_realm_user_update("events[0]", events[0], "role") self.assertEqual(events[0]["person"]["role"], role) def test_change_is_guest(self) -> None: stream = Stream.objects.get(name="Denmark") do_add_default_stream(stream) reset_emails_in_zulip_realm() # Important: We need to refresh from the database here so that # we don't have a stale UserProfile object with an old value # for email being passed into this next function. self.user_profile.refresh_from_db() do_change_user_role(self.user_profile, UserProfile.ROLE_MEMBER, acting_user=None) for role in [UserProfile.ROLE_GUEST, UserProfile.ROLE_MEMBER]: events = self.verify_action( lambda: do_change_user_role(self.user_profile, role, acting_user=None) ) check_realm_user_update("events[0]", events[0], "role") self.assertEqual(events[0]["person"]["role"], role) def test_change_notification_settings(self) -> None: for notification_setting, v in self.user_profile.notification_setting_types.items(): if notification_setting in ["notification_sound", "desktop_icon_count_display"]: # These settings are tested in their own tests. continue do_change_notification_settings( self.user_profile, notification_setting, False, acting_user=self.user_profile ) for setting_value in [True, False]: events = self.verify_action( lambda: do_change_notification_settings( self.user_profile, notification_setting, setting_value, acting_user=self.user_profile, ) ) check_update_global_notifications("events[0]", events[0], setting_value) # Also test with notification_settings_null=True events = self.verify_action( lambda: do_change_notification_settings( self.user_profile, notification_setting, setting_value, acting_user=self.user_profile, ), notification_settings_null=True, state_change_expected=False, ) check_update_global_notifications("events[0]", events[0], setting_value) def test_change_notification_sound(self) -> None: notification_setting = "notification_sound" events = self.verify_action( lambda: do_change_notification_settings( self.user_profile, notification_setting, "ding", acting_user=self.user_profile ) ) check_update_global_notifications("events[0]", events[0], "ding") def test_change_desktop_icon_count_display(self) -> None: notification_setting = "desktop_icon_count_display" events = self.verify_action( lambda: do_change_notification_settings( self.user_profile, notification_setting, 2, acting_user=self.user_profile ) ) check_update_global_notifications("events[0]", events[0], 2) events = self.verify_action( lambda: do_change_notification_settings( self.user_profile, notification_setting, 1, acting_user=self.user_profile ) ) check_update_global_notifications("events[0]", events[0], 1) def test_realm_update_plan_type(self) -> None: realm = self.user_profile.realm state_data = fetch_initial_state_data(self.user_profile) self.assertEqual(state_data["realm_plan_type"], Realm.SELF_HOSTED) self.assertEqual(state_data["zulip_plan_is_not_limited"], True) events = self.verify_action( lambda: do_change_plan_type(realm, Realm.LIMITED, acting_user=self.user_profile) ) check_realm_update("events[0]", events[0], "plan_type") state_data = fetch_initial_state_data(self.user_profile) self.assertEqual(state_data["realm_plan_type"], Realm.LIMITED) self.assertEqual(state_data["zulip_plan_is_not_limited"], False) def test_realm_emoji_events(self) -> None: author = self.example_user("iago") with get_test_image_file("img.png") as img_file: events = self.verify_action( lambda: check_add_realm_emoji(self.user_profile.realm, "my_emoji", author, img_file) ) check_realm_emoji_update("events[0]", events[0]) events = self.verify_action( lambda: do_remove_realm_emoji(self.user_profile.realm, "my_emoji") ) check_realm_emoji_update("events[0]", events[0]) def test_realm_filter_events(self) -> None: regex = "#(?P<id>[123])" url = "https://realm.com/my_realm_filter/%(id)s" events = self.verify_action( lambda: do_add_linkifier(self.user_profile.realm, regex, url), num_events=2 ) check_realm_linkifiers("events[0]", events[0]) check_realm_filters("events[1]", events[1]) regex = "#(?P<id>[0-9]+)" linkifier_id = events[0]["realm_linkifiers"][0]["id"] events = self.verify_action( lambda: do_update_linkifier(self.user_profile.realm, linkifier_id, regex, url), num_events=2, ) check_realm_linkifiers("events[0]", events[0]) check_realm_filters("events[1]", events[1]) events = self.verify_action( lambda: do_remove_linkifier(self.user_profile.realm, regex), num_events=2 ) check_realm_linkifiers("events[0]", events[0]) check_realm_filters("events[1]", events[1]) def test_realm_domain_events(self) -> None: events = self.verify_action( lambda: do_add_realm_domain(self.user_profile.realm, "zulip.org", False) ) check_realm_domains_add("events[0]", events[0]) self.assertEqual(events[0]["realm_domain"]["domain"], "zulip.org") self.assertEqual(events[0]["realm_domain"]["allow_subdomains"], False) test_domain = RealmDomain.objects.get(realm=self.user_profile.realm, domain="zulip.org") events = self.verify_action(lambda: do_change_realm_domain(test_domain, True)) check_realm_domains_change("events[0]", events[0]) self.assertEqual(events[0]["realm_domain"]["domain"], "zulip.org") self.assertEqual(events[0]["realm_domain"]["allow_subdomains"], True) events = self.verify_action(lambda: do_remove_realm_domain(test_domain, acting_user=None)) check_realm_domains_remove("events[0]", events[0]) self.assertEqual(events[0]["domain"], "zulip.org") def test_realm_playground_events(self) -> None: playground_info = dict( name="Python playground", pygments_language="Python", url_prefix="https://python.example.com", ) events = self.verify_action( lambda: do_add_realm_playground(self.user_profile.realm, **playground_info) ) check_realm_playgrounds("events[0]", events[0]) last_id = RealmPlayground.objects.last().id realm_playground = access_playground_by_id(self.user_profile.realm, last_id) events = self.verify_action( lambda: do_remove_realm_playground(self.user_profile.realm, realm_playground) ) check_realm_playgrounds("events[0]", events[0]) def test_create_bot(self) -> None: action = lambda: self.create_bot("test") events = self.verify_action(action, num_events=2) check_realm_bot_add("events[1]", events[1]) action = lambda: self.create_bot( "test_outgoing_webhook", full_name="Outgoing Webhook Bot", payload_url=orjson.dumps("https://foo.bar.com").decode(), interface_type=Service.GENERIC, bot_type=UserProfile.OUTGOING_WEBHOOK_BOT, ) events = self.verify_action(action, num_events=2) # The third event is the second call of notify_created_bot, which contains additional # data for services (in contrast to the first call). check_realm_bot_add("events[1]", events[1]) action = lambda: self.create_bot( "test_embedded", full_name="Embedded Bot", service_name="helloworld", config_data=orjson.dumps({"foo": "bar"}).decode(), bot_type=UserProfile.EMBEDDED_BOT, ) events = self.verify_action(action, num_events=2) check_realm_bot_add("events[1]", events[1]) def test_change_bot_full_name(self) -> None: bot = self.create_bot("test") action = lambda: do_change_full_name(bot, "New Bot Name", self.user_profile) events = self.verify_action(action, num_events=2) check_realm_bot_update("events[1]", events[1], "full_name") def test_regenerate_bot_api_key(self) -> None: bot = self.create_bot("test") action = lambda: do_regenerate_api_key(bot, self.user_profile) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "api_key") def test_change_bot_avatar_source(self) -> None: bot = self.create_bot("test") action = lambda: do_change_avatar_fields( bot, bot.AVATAR_FROM_USER, acting_user=self.user_profile ) events = self.verify_action(action, num_events=2) check_realm_bot_update("events[0]", events[0], "avatar_url") self.assertEqual(events[1]["type"], "realm_user") def test_change_realm_icon_source(self) -> None: action = lambda: do_change_icon_source( self.user_profile.realm, Realm.ICON_UPLOADED, acting_user=None ) events = self.verify_action(action, state_change_expected=True) check_realm_update_dict("events[0]", events[0]) def test_change_realm_day_mode_logo_source(self) -> None: action = lambda: do_change_logo_source( self.user_profile.realm, Realm.LOGO_UPLOADED, False, acting_user=self.user_profile ) events = self.verify_action(action, state_change_expected=True) check_realm_update_dict("events[0]", events[0]) def test_change_realm_night_mode_logo_source(self) -> None: action = lambda: do_change_logo_source( self.user_profile.realm, Realm.LOGO_UPLOADED, True, acting_user=self.user_profile ) events = self.verify_action(action, state_change_expected=True) check_realm_update_dict("events[0]", events[0]) def test_change_bot_default_all_public_streams(self) -> None: bot = self.create_bot("test") action = lambda: do_change_default_all_public_streams(bot, True, acting_user=None) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "default_all_public_streams") def test_change_bot_default_sending_stream(self) -> None: bot = self.create_bot("test") stream = get_stream("Rome", bot.realm) action = lambda: do_change_default_sending_stream(bot, stream, acting_user=None) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "default_sending_stream") action = lambda: do_change_default_sending_stream(bot, None, acting_user=None) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "default_sending_stream") def test_change_bot_default_events_register_stream(self) -> None: bot = self.create_bot("test") stream = get_stream("Rome", bot.realm) action = lambda: do_change_default_events_register_stream(bot, stream, acting_user=None) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "default_events_register_stream") action = lambda: do_change_default_events_register_stream(bot, None, acting_user=None) events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "default_events_register_stream") def test_change_bot_owner(self) -> None: self.user_profile = self.example_user("iago") owner = self.example_user("hamlet") bot = self.create_bot("test") action = lambda: do_change_bot_owner(bot, owner, self.user_profile) events = self.verify_action(action, num_events=2) check_realm_bot_update("events[0]", events[0], "owner_id") check_realm_user_update("events[1]", events[1], "bot_owner_id") self.user_profile = self.example_user("aaron") owner = self.example_user("hamlet") bot = self.create_bot("test1", full_name="Test1 Testerson") action = lambda: do_change_bot_owner(bot, owner, self.user_profile) events = self.verify_action(action, num_events=2) check_realm_bot_delete("events[0]", events[0]) check_realm_user_update("events[1]", events[1], "bot_owner_id") previous_owner = self.example_user("aaron") self.user_profile = self.example_user("hamlet") bot = self.create_test_bot("test2", previous_owner, full_name="Test2 Testerson") action = lambda: do_change_bot_owner(bot, self.user_profile, previous_owner) events = self.verify_action(action, num_events=2) check_realm_bot_add("events[0]", events[0]) check_realm_user_update("events[1]", events[1], "bot_owner_id") def test_do_update_outgoing_webhook_service(self) -> None: self.user_profile = self.example_user("iago") bot = self.create_test_bot( "test", self.user_profile, full_name="Test Bot", bot_type=UserProfile.OUTGOING_WEBHOOK_BOT, payload_url=orjson.dumps("http://hostname.domain2.com").decode(), interface_type=Service.GENERIC, ) action = lambda: do_update_outgoing_webhook_service(bot, 2, "http://hostname.domain2.com") events = self.verify_action(action) check_realm_bot_update("events[0]", events[0], "services") def test_do_deactivate_user(self) -> None: bot = self.create_bot("test") action = lambda: do_deactivate_user(bot, acting_user=None) events = self.verify_action(action, num_events=2) check_realm_user_remove("events[0]", events[0]) check_realm_bot_remove("events[1]", events[1]) def test_do_reactivate_user(self) -> None: bot = self.create_bot("test") do_deactivate_user(bot, acting_user=None) action = lambda: do_reactivate_user(bot, acting_user=None) events = self.verify_action(action, num_events=2) check_realm_bot_add("events[1]", events[1]) def test_do_deactivate_realm(self) -> None: realm = self.user_profile.realm action = lambda: do_deactivate_realm(realm, acting_user=None) # We delete sessions of all active users when a realm is # deactivated, and redirect them to a deactivated page in # order to inform that realm/organization has been # deactivated. state_change_expected is False is kinda # correct because were one to somehow compute page_params (as # this test does), but that's not actually possible. events = self.verify_action(action, state_change_expected=False) check_realm_deactivated("events[0]", events[0]) def test_do_mark_hotspot_as_read(self) -> None: self.user_profile.tutorial_status = UserProfile.TUTORIAL_WAITING self.user_profile.save(update_fields=["tutorial_status"]) events = self.verify_action( lambda: do_mark_hotspot_as_read(self.user_profile, "intro_reply") ) check_hotspots("events[0]", events[0]) def test_rename_stream(self) -> None: for i, include_streams in enumerate([True, False]): old_name = f"old name{i}" new_name = f"new name{i}" stream = self.make_stream(old_name) self.subscribe(self.user_profile, stream.name) action = lambda: do_rename_stream(stream, new_name, self.user_profile) events = self.verify_action(action, num_events=3, include_streams=include_streams) check_stream_update("events[0]", events[0]) self.assertEqual(events[0]["name"], old_name) check_stream_update("events[1]", events[1]) self.assertEqual(events[1]["name"], old_name) check_message("events[2]", events[2]) fields = dict( sender_email="notification-bot@zulip.com", display_recipient=new_name, sender_full_name="Notification Bot", is_me_message=False, type="stream", client="Internal", ) fields[TOPIC_NAME] = "stream events" msg = events[2]["message"] for k, v in fields.items(): self.assertEqual(msg[k], v) def test_deactivate_stream_neversubscribed(self) -> None: for i, include_streams in enumerate([True, False]): stream = self.make_stream(f"stream{i}") action = lambda: do_deactivate_stream(stream, acting_user=None) events = self.verify_action(action, include_streams=include_streams) check_stream_delete("events[0]", events[0]) def test_subscribe_other_user_never_subscribed(self) -> None: for i, include_streams in enumerate([True, False]): action = lambda: self.subscribe(self.example_user("othello"), f"test_stream{i}") events = self.verify_action(action, num_events=2, include_streams=True) check_subscription_peer_add("events[1]", events[1]) def test_remove_other_user_never_subscribed(self) -> None: self.subscribe(self.example_user("othello"), "test_stream") stream = get_stream("test_stream", self.user_profile.realm) action = lambda: bulk_remove_subscriptions( [self.example_user("othello")], [stream], get_client("website"), acting_user=None ) events = self.verify_action(action) check_subscription_peer_remove("events[0]", events[0]) def test_do_delete_message_stream(self) -> None: hamlet = self.example_user("hamlet") msg_id = self.send_stream_message(hamlet, "Verona") msg_id_2 = self.send_stream_message(hamlet, "Verona") messages = [Message.objects.get(id=msg_id), Message.objects.get(id=msg_id_2)] events = self.verify_action( lambda: do_delete_messages(self.user_profile.realm, messages), state_change_expected=True, ) check_delete_message( "events[0]", events[0], message_type="stream", num_message_ids=2, is_legacy=False, ) def test_do_delete_message_stream_legacy(self) -> None: """ Test for legacy method of deleting messages which sends an event per message to delete to the client. """ hamlet = self.example_user("hamlet") msg_id = self.send_stream_message(hamlet, "Verona") msg_id_2 = self.send_stream_message(hamlet, "Verona") messages = [Message.objects.get(id=msg_id), Message.objects.get(id=msg_id_2)] events = self.verify_action( lambda: do_delete_messages(self.user_profile.realm, messages), state_change_expected=True, bulk_message_deletion=False, num_events=2, ) check_delete_message( "events[0]", events[0], message_type="stream", num_message_ids=1, is_legacy=True, ) def test_do_delete_message_personal(self) -> None: msg_id = self.send_personal_message( self.example_user("cordelia"), self.user_profile, "hello", ) message = Message.objects.get(id=msg_id) events = self.verify_action( lambda: do_delete_messages(self.user_profile.realm, [message]), state_change_expected=True, ) check_delete_message( "events[0]", events[0], message_type="private", num_message_ids=1, is_legacy=False, ) def test_do_delete_message_personal_legacy(self) -> None: msg_id = self.send_personal_message( self.example_user("cordelia"), self.user_profile, "hello", ) message = Message.objects.get(id=msg_id) events = self.verify_action( lambda: do_delete_messages(self.user_profile.realm, [message]), state_change_expected=True, bulk_message_deletion=False, ) check_delete_message( "events[0]", events[0], message_type="private", num_message_ids=1, is_legacy=True, ) def test_do_delete_message_no_max_id(self) -> None: user_profile = self.example_user("aaron") # Delete all historical messages for this user user_profile = self.example_user("hamlet") UserMessage.objects.filter(user_profile=user_profile).delete() msg_id = self.send_stream_message(user_profile, "Verona") message = Message.objects.get(id=msg_id) self.verify_action( lambda: do_delete_messages(self.user_profile.realm, [message]), state_change_expected=True, ) result = fetch_initial_state_data(user_profile) self.assertEqual(result["max_message_id"], -1) def test_add_attachment(self) -> None: self.login("hamlet") fp = StringIO("zulip!") fp.name = "zulip.txt" uri = None def do_upload() -> None: nonlocal uri result = self.client_post("/json/user_uploads", {"file": fp}) self.assert_json_success(result) self.assertIn("uri", result.json()) uri = result.json()["uri"] base = "/user_uploads/" self.assertEqual(base, uri[: len(base)]) events = self.verify_action(lambda: do_upload(), num_events=1, state_change_expected=False) check_attachment_add("events[0]", events[0]) self.assertEqual(events[0]["upload_space_used"], 6) # Verify that the DB has the attachment marked as unclaimed entry = Attachment.objects.get(file_name="zulip.txt") self.assertEqual(entry.is_claimed(), False) hamlet = self.example_user("hamlet") self.subscribe(hamlet, "Denmark") assert uri is not None body = f"First message ...[zulip.txt](http://{hamlet.realm.host}" + uri + ")" events = self.verify_action( lambda: self.send_stream_message(self.example_user("hamlet"), "Denmark", body, "test"), num_events=2, ) check_attachment_update("events[0]", events[0]) self.assertEqual(events[0]["upload_space_used"], 6) # Now remove the attachment events = self.verify_action( lambda: self.client_delete(f"/json/attachments/{entry.id}"), num_events=1, state_change_expected=False, ) check_attachment_remove("events[0]", events[0]) self.assertEqual(events[0]["upload_space_used"], 0) def test_notify_realm_export(self) -> None: do_change_user_role( self.user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None ) self.login_user(self.user_profile) with mock.patch( "zerver.lib.export.do_export_realm", return_value=create_dummy_file("test-export.tar.gz"), ): with stdout_suppressed(), self.assertLogs(level="INFO") as info_logs: events = self.verify_action( lambda: self.client_post("/json/export/realm"), state_change_expected=True, num_events=3, ) self.assertTrue("INFO:root:Completed data export for zulip in" in info_logs.output[0]) # We get two realm_export events for this action, where the first # is missing the export_url (because it's pending). check_realm_export( "events[0]", events[0], has_export_url=False, has_deleted_timestamp=False, has_failed_timestamp=False, ) check_realm_export( "events[2]", events[2], has_export_url=True, has_deleted_timestamp=False, has_failed_timestamp=False, ) # Now we check the deletion of the export. audit_log_entry = RealmAuditLog.objects.filter( event_type=RealmAuditLog.REALM_EXPORTED ).first() events = self.verify_action( lambda: self.client_delete(f"/json/export/realm/{audit_log_entry.id}"), state_change_expected=False, num_events=1, ) check_realm_export( "events[0]", events[0], has_export_url=False, has_deleted_timestamp=True, has_failed_timestamp=False, ) def test_notify_realm_export_on_failure(self) -> None: do_change_user_role( self.user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None ) self.login_user(self.user_profile) with mock.patch( "zerver.lib.export.do_export_realm", side_effect=Exception("test") ), self.assertLogs(level="ERROR") as error_log: with stdout_suppressed(): events = self.verify_action( lambda: self.client_post("/json/export/realm"), state_change_expected=False, num_events=2, ) # Log is of following format: "ERROR:root:Data export for zulip failed after 0.004499673843383789" # Where last floating number is time and will vary in each test hence the following assertion is # independent of time bit by not matching exact log but only part of it. self.assertTrue("ERROR:root:Data export for zulip failed after" in error_log.output[0]) # We get two events for the export. check_realm_export( "events[0]", events[0], has_export_url=False, has_deleted_timestamp=False, has_failed_timestamp=False, ) check_realm_export( "events[1]", events[1], has_export_url=False, has_deleted_timestamp=False, has_failed_timestamp=True, ) def test_has_zoom_token(self) -> None: events = self.verify_action( lambda: do_set_zoom_token(self.user_profile, {"access_token": "token"}), ) check_has_zoom_token("events[0]", events[0], value=True) events = self.verify_action(lambda: do_set_zoom_token(self.user_profile, None)) check_has_zoom_token("events[0]", events[0], value=False) def test_restart_event(self) -> None: with self.assertRaises(RestartEventException): self.verify_action(lambda: send_restart_events(immediate=True)) class RealmPropertyActionTest(BaseAction): def do_set_realm_property_test(self, name: str) -> None: bool_tests: List[bool] = [True, False, True] test_values: Dict[str, Any] = dict( default_language=["es", "de", "en"], description=["Realm description", "New description"], digest_weekday=[0, 1, 2], message_retention_days=[10, 20], name=["Zulip", "New Name"], waiting_period_threshold=[10, 20], create_stream_policy=[4, 3, 2, 1], invite_to_stream_policy=[4, 3, 2, 1], private_message_policy=[2, 1], user_group_edit_policy=[1, 2], wildcard_mention_policy=[7, 6, 5, 4, 3, 2, 1], email_address_visibility=[Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS], bot_creation_policy=[Realm.BOT_CREATION_EVERYONE], video_chat_provider=[ Realm.VIDEO_CHAT_PROVIDERS["jitsi_meet"]["id"], ], giphy_rating=[ Realm.GIPHY_RATING_OPTIONS["disabled"]["id"], ], default_code_block_language=["python", "javascript"], message_content_delete_limit_seconds=[1000, 1100, 1200], invite_to_realm_policy=[4, 3, 2, 1], move_messages_between_streams_policy=[4, 3, 2, 1], ) vals = test_values.get(name) property_type = Realm.property_types[name] if property_type is bool: vals = bool_tests if vals is None: raise AssertionError(f"No test created for {name}") now = timezone_now() do_set_realm_property(self.user_profile.realm, name, vals[0], acting_user=self.user_profile) self.assertEqual( RealmAuditLog.objects.filter( realm=self.user_profile.realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time__gte=now, acting_user=self.user_profile, ).count(), 1, ) for count, val in enumerate(vals[1:]): now = timezone_now() state_change_expected = True events = self.verify_action( lambda: do_set_realm_property( self.user_profile.realm, name, val, acting_user=self.user_profile ), state_change_expected=state_change_expected, ) old_value = vals[count] self.assertEqual( RealmAuditLog.objects.filter( realm=self.user_profile.realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time__gte=now, acting_user=self.user_profile, extra_data=orjson.dumps( { RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: val, "property": name, } ).decode(), ).count(), 1, ) check_realm_update("events[0]", events[0], name) def test_change_realm_property(self) -> None: for prop in Realm.property_types: with self.settings(SEND_DIGEST_EMAILS=True): self.do_set_realm_property_test(prop) class UserDisplayActionTest(BaseAction): def do_set_user_display_settings_test(self, setting_name: str) -> None: """Test updating each setting in UserProfile.property_types dict.""" test_changes: Dict[str, Any] = dict( emojiset=["twitter"], default_language=["es", "de", "en"], default_view=["all_messages", "recent_topics"], timezone=["America/Denver", "Pacific/Pago_Pago", "Pacific/Galapagos", ""], demote_inactive_streams=[2, 3, 1], color_scheme=[2, 3, 1], ) num_events = 1 if setting_name == "timezone": num_events = 2 values = test_changes.get(setting_name) property_type = UserProfile.property_types[setting_name] if property_type is bool: if getattr(self.user_profile, setting_name) is False: values = [True, False, True] else: values = [False, True, False] if values is None: raise AssertionError(f"No test created for {setting_name}") for value in values: events = self.verify_action( lambda: do_set_user_display_setting(self.user_profile, setting_name, value), num_events=num_events, ) check_update_display_settings("events[0]", events[0]) if setting_name == "timezone": check_realm_user_update("events[1]", events[1], "timezone") def test_set_user_display_settings(self) -> None: for prop in UserProfile.property_types: self.do_set_user_display_settings_test(prop) class SubscribeActionTest(BaseAction): def test_subscribe_events(self) -> None: self.do_test_subscribe_events(include_subscribers=True) def test_subscribe_events_no_include_subscribers(self) -> None: self.do_test_subscribe_events(include_subscribers=False) def do_test_subscribe_events(self, include_subscribers: bool) -> None: # Subscribe to a totally new stream, so it's just Hamlet on it action: Callable[[], object] = lambda: self.subscribe( self.example_user("hamlet"), "test_stream" ) events = self.verify_action( action, event_types=["subscription"], include_subscribers=include_subscribers ) check_subscription_add("events[0]", events[0]) # Add another user to that totally new stream action = lambda: self.subscribe(self.example_user("othello"), "test_stream") events = self.verify_action( action, include_subscribers=include_subscribers, state_change_expected=include_subscribers, ) check_subscription_peer_add("events[0]", events[0]) stream = get_stream("test_stream", self.user_profile.realm) # Now remove the first user, to test the normal unsubscribe flow and # 'peer_remove' event for subscribed streams. action = lambda: bulk_remove_subscriptions( [self.example_user("othello")], [stream], get_client("website"), acting_user=None ) events = self.verify_action( action, include_subscribers=include_subscribers, state_change_expected=include_subscribers, ) check_subscription_peer_remove("events[0]", events[0]) # Now remove the user himself, to test the 'remove' event flow action = lambda: bulk_remove_subscriptions( [self.example_user("hamlet")], [stream], get_client("website"), acting_user=None ) events = self.verify_action( action, include_subscribers=include_subscribers, include_streams=False, num_events=2 ) check_subscription_remove("events[0]", events[0]) self.assert_length(events[0]["subscriptions"], 1) self.assertEqual( events[0]["subscriptions"][0]["name"], "test_stream", ) # Subscribe other user to test 'peer_add' event flow for unsubscribed stream. action = lambda: self.subscribe(self.example_user("iago"), "test_stream") events = self.verify_action( action, event_types=["subscription"], include_subscribers=include_subscribers, state_change_expected=include_subscribers, ) check_subscription_peer_add("events[0]", events[0]) # Remove the user to test 'peer_remove' event flow for unsubscribed stream. action = lambda: bulk_remove_subscriptions( [self.example_user("iago")], [stream], get_client("website"), acting_user=None ) events = self.verify_action( action, include_subscribers=include_subscribers, state_change_expected=include_subscribers, ) check_subscription_peer_remove("events[0]", events[0]) # Now resubscribe a user, to make sure that works on a vacated stream action = lambda: self.subscribe(self.example_user("hamlet"), "test_stream") events = self.verify_action( action, include_subscribers=include_subscribers, include_streams=False, num_events=1 ) check_subscription_add("events[0]", events[0]) action = lambda: do_change_stream_description(stream, "new description") events = self.verify_action(action, include_subscribers=include_subscribers) check_stream_update("events[0]", events[0]) # Update stream privacy action = lambda: do_change_stream_invite_only( stream, True, history_public_to_subscribers=True ) events = self.verify_action(action, include_subscribers=include_subscribers) check_stream_update("events[0]", events[0]) # Update stream stream_post_policy property action = lambda: do_change_stream_post_policy(stream, Stream.STREAM_POST_POLICY_ADMINS) events = self.verify_action(action, include_subscribers=include_subscribers, num_events=2) check_stream_update("events[0]", events[0]) action = lambda: do_change_stream_message_retention_days(stream, -1) events = self.verify_action(action, include_subscribers=include_subscribers, num_events=1) check_stream_update("events[0]", events[0]) # Subscribe to a totally new invite-only stream, so it's just Hamlet on it stream = self.make_stream("private", self.user_profile.realm, invite_only=True) stream.message_retention_days = 10 stream.save() user_profile = self.example_user("hamlet") action = lambda: bulk_add_subscriptions( user_profile.realm, [stream], [user_profile], acting_user=None ) events = self.verify_action(action, include_subscribers=include_subscribers, num_events=2) check_stream_create("events[0]", events[0]) check_subscription_add("events[1]", events[1]) self.assertEqual( events[0]["streams"][0]["message_retention_days"], 10, )
unknown
codeparrot/codeparrot-clean
//// [tests/cases/conformance/async/es5/asyncImportedPromise_es5.ts] //// //// [task.ts] export class Task<T> extends Promise<T> { } //// [test.ts] import { Task } from "./task"; class Test { async example<T>(): Task<T> { return; } } //// [task.js] "use strict"; var __extends = (this && this.__extends) || (function () { var extendStatics = function (d, b) { extendStatics = Object.setPrototypeOf || ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; return extendStatics(d, b); }; return function (d, b) { if (typeof b !== "function" && b !== null) throw new TypeError("Class extends value " + String(b) + " is not a constructor or null"); extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; })(); Object.defineProperty(exports, "__esModule", { value: true }); exports.Task = void 0; var Task = /** @class */ (function (_super) { __extends(Task, _super); function Task() { return _super !== null && _super.apply(this, arguments) || this; } return Task; }(Promise)); exports.Task = Task; //// [test.js] "use strict"; var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } return new (P || (P = Promise))(function (resolve, reject) { function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } step((generator = generator.apply(thisArg, _arguments || [])).next()); }); }; var __generator = (this && this.__generator) || function (thisArg, body) { var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype); return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; function verb(n) { return function (v) { return step([n, v]); }; } function step(op) { if (f) throw new TypeError("Generator is already executing."); while (g && (g = 0, op[0] && (_ = 0)), _) try { if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; if (y = 0, t) op = [op[0] & 2, t.value]; switch (op[0]) { case 0: case 1: t = op; break; case 4: _.label++; return { value: op[1], done: false }; case 5: _.label++; y = op[1]; op = [0]; continue; case 7: op = _.ops.pop(); _.trys.pop(); continue; default: if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } if (t[2]) _.ops.pop(); _.trys.pop(); continue; } op = body.call(thisArg, _); } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; } }; Object.defineProperty(exports, "__esModule", { value: true }); var task_1 = require("./task"); var Test = /** @class */ (function () { function Test() { } Test.prototype.example = function () { return __awaiter(this, void 0, task_1.Task, function () { return __generator(this, function (_a) { return [2 /*return*/]; }); }); }; return Test; }());
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/asyncImportedPromise_es5(target=es5).js
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This script can either source a file and dump the enironment changes done by # it, or just simply dump the current environment as JSON into a file. import json import optparse import os import pipes import subprocess import sys def main(): parser = optparse.OptionParser() parser.add_option('-f', '--output-json', help='File to dump the environment as JSON into.') parser.add_option( '-d', '--dump-mode', action='store_true', help='Dump the environment to sys.stdout and exit immediately.') parser.disable_interspersed_args() options, args = parser.parse_args() if options.dump_mode: if args or options.output_json: parser.error('Cannot specify args or --output-json with --dump-mode.') json.dump(dict(os.environ), sys.stdout) else: if not options.output_json: parser.error('Requires --output-json option.') envsetup_cmd = ' '.join(map(pipes.quote, args)) full_cmd = [ 'bash', '-c', '. %s > /dev/null; %s -d' % (envsetup_cmd, os.path.abspath(__file__)) ] try: output = subprocess.check_output(full_cmd) except Exception as e: sys.exit('Error running %s and dumping environment.' % envsetup_cmd) env_diff = {} new_env = json.loads(output) for k, val in new_env.items(): if k == '_' or (k in os.environ and os.environ[k] == val): continue env_diff[k] = val with open(options.output_json, 'w') as f: json.dump(env_diff, f) if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
from .hash import bin_double_sha256, bin_to_hex_reversed, hex_to_bin_reversed def hex_to_bin_reversed_hashes(hex_hashes): return [hex_to_bin_reversed(h) for h in hex_hashes] def calculate_merkle_pairs(bin_hashes, hash_function=bin_double_sha256): """ takes in a list of binary hashes, returns a binary hash """ hashes = list(bin_hashes) # if there are an odd number of hashes, double up the last one if len(hashes) % 2 == 1: hashes.append(hashes[-1]) # build the new list of hashes new_hashes = [] for i in range(0, len(hashes), 2): new_hashes.append(hash_function(hashes[i] + hashes[i+1])) # return the new list of hashes return new_hashes def calculate_merkle_root(hashes, hash_function=bin_double_sha256, hex_format=True): """ takes in a list of binary hashes, returns a binary hash """ if hex_format: hashes = hex_to_bin_reversed_hashes(hashes) # keep moving up the merkle tree, constructing one row at a time while len(hashes) > 1: hashes = calculate_merkle_pairs(hashes, hash_function) # get the merkle root merkle_root = hashes[0] # if the user wants the merkle root in hex format, convert it if hex_format: return bin_to_hex_reversed(merkle_root) # return the binary merkle root return merkle_root class MerkleTree(): def __init__(self, hashes, hex_format=True, hash_function=bin_double_sha256): if not len(hashes) > 0: raise ValueError("At least one hash is required.") self.rows = [] # if the hashes are in hex format, first convert them to binary if hex_format: hashes = hex_to_bin_reversed_hashes(hashes) # build the rows of the merkle tree self.rows.append(hashes) while len(hashes) > 1: hashes = calculate_merkle_pairs(hashes, hash_function) self.rows.append(hashes) def get(self, row_index, column_index): # check to make sure there are enough rows if row_index + 1 > len(self.rows): raise ValueError("There aren't that many rows.") row = self.rows(row_index) # check to make sure there are enough items in the row if column_index + 1 > len(row): raise ValueError("There aren't that many items in that row.") # return the requested item return row[column_index] def root(self, hex_format=True): # return the merkle root bin_merkle_root = self.rows[-1][0] if hex_format: return bin_to_hex_reversed(bin_merkle_root)
unknown
codeparrot/codeparrot-clean
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Union, Optional from typing_extensions import Literal, TypeAlias from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText from ..responses.response_input_audio import ResponseInputAudio __all__ = ["GraderInputs", "GraderInputItem", "GraderInputItemOutputText", "GraderInputItemInputImage"] class GraderInputItemOutputText(BaseModel): """A text output from the model.""" text: str """The text output from the model.""" type: Literal["output_text"] """The type of the output text. Always `output_text`.""" class GraderInputItemInputImage(BaseModel): """An image input block used within EvalItem content arrays.""" image_url: str """The URL of the image input.""" type: Literal["input_image"] """The type of the image input. Always `input_image`.""" detail: Optional[str] = None """The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`. """ GraderInputItem: TypeAlias = Union[ str, ResponseInputText, GraderInputItemOutputText, GraderInputItemInputImage, ResponseInputAudio ] GraderInputs: TypeAlias = List[GraderInputItem]
python
github
https://github.com/openai/openai-python
src/openai/types/graders/grader_inputs.py
import NIOConcurrencyHelpers extension Application { public var servers: Servers { .init(application: self) } public var server: Server { guard let makeServer = self.servers.storage.makeServer.withLockedValue({ $0.factory }) else { fatalError("No server configured. Configure with app.servers.use(...)") } return makeServer(self) } public struct Servers: Sendable { public struct Provider { let run: @Sendable (Application) -> () @preconcurrency public init(_ run: @Sendable @escaping (Application) -> ()) { self.run = run } } struct CommandKey: StorageKey { typealias Value = ServeCommand } final class Storage: Sendable { struct ServerFactory { let factory: (@Sendable (Application) -> Server)? } let makeServer: NIOLockedValueBox<ServerFactory> init() { self.makeServer = .init(.init(factory: nil)) } } struct Key: StorageKey { typealias Value = Storage } func initialize() { self.application.storage[Key.self] = .init() } public func use(_ provider: Provider) { provider.run(self.application) } @preconcurrency public func use(_ makeServer: @Sendable @escaping (Application) -> (Server)) { self.storage.makeServer.withLockedValue { $0 = .init(factory: makeServer) } } @available(*, noasync, renamed: "asyncCommand", message: "Use the async property instead.") public var command: ServeCommand { if let existing = self.application.storage.get(CommandKey.self) { return existing } else { let new = ServeCommand() self.application.storage.set(CommandKey.self, to: new) { $0.shutdown() } return new } } public var asyncCommand: ServeCommand { get async { if let existing = self.application.storage.get(CommandKey.self) { return existing } else { let new = ServeCommand() await self.application.storage.setWithAsyncShutdown(CommandKey.self, to: new) { await $0.asyncShutdown() } return new } } } let application: Application var storage: Storage { guard let storage = self.application.storage[Key.self] else { fatalError("Servers not initialized. Configure with app.servers.initialize()") } return storage } } }
swift
github
https://github.com/vapor/vapor
Sources/Vapor/Server/Application+Servers.swift
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numbers from itertools import chain from math import ceil import numpy as np from scipy import sparse from scipy.stats.mstats import mquantiles from sklearn.base import is_regressor from sklearn.inspection import partial_dependence from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index from sklearn.utils import Bunch, _safe_indexing, check_array, check_random_state from sklearn.utils._encode import _unique from sklearn.utils._optional_dependencies import check_matplotlib_support from sklearn.utils._plotting import _validate_style_kwargs from sklearn.utils.parallel import Parallel, delayed class PartialDependenceDisplay: """Partial Dependence Plot (PDP) and Individual Conditional Expectation (ICE). It is recommended to use :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a :class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are stored as attributes. For general information regarding `scikit-learn` visualization tools, see the :ref:`Visualization Guide <visualizations>`. For guidance on interpreting these plots, refer to the :ref:`Inspection Guide <partial_dependence>`. For an example on how to use this class, see the following example: :ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`. .. versionadded:: 0.22 Parameters ---------- pd_results : list of Bunch Results of :func:`~sklearn.inspection.partial_dependence` for ``features``. features : list of (int,) or list of (int, int) Indices of features for a given plot. A tuple of one integer will plot a partial dependence curve of one feature. A tuple of two integers will plot a two-way partial dependence curve as a contour plot. feature_names : list of str Feature names corresponding to the indices in ``features``. target_idx : int - In a multiclass setting, specifies the class for which the PDPs should be computed. Note that for binary classification, the positive class (index 1) is always used. - In a multioutput setting, specifies the task for which the PDPs should be computed. Ignored in binary classification or classical regression settings. deciles : dict Deciles for feature indices in ``features``. kind : {'average', 'individual', 'both'} or list of such str, \ default='average' Whether to plot the partial dependence averaged across all the samples in the dataset or one line per sample or both. - ``kind='average'`` results in the traditional PD plot; - ``kind='individual'`` results in the ICE plot; - ``kind='both'`` results in plotting both the ICE and PD on the same plot. A list of such strings can be provided to specify `kind` on a per-plot basis. The length of the list should be the same as the number of interaction requested in `features`. .. note:: ICE ('individual' or 'both') is not a valid option for 2-ways interactions plot. As a result, an error will be raised. 2-ways interaction plots should always be configured to use the 'average' kind instead. .. note:: The fast ``method='recursion'`` option is only available for `kind='average'` and `sample_weights=None`. Computing individual dependencies and doing weighted averages requires using the slower `method='brute'`. .. versionadded:: 0.24 Add `kind` parameter with `'average'`, `'individual'`, and `'both'` options. .. versionadded:: 1.1 Add the possibility to pass a list of string specifying `kind` for each plot. subsample : float, int or None, default=1000 Sampling for ICE curves when `kind` is 'individual' or 'both'. If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to be used to plot ICE curves. If int, represents the maximum absolute number of samples to use. Note that the full dataset is still used to calculate partial dependence when `kind='both'`. .. versionadded:: 0.24 random_state : int, RandomState instance or None, default=None Controls the randomness of the selected samples when subsamples is not `None`. See :term:`Glossary <random_state>` for details. .. versionadded:: 0.24 is_categorical : list of (bool,) or list of (bool, bool), default=None Whether each target feature in `features` is categorical or not. The list should be same size as `features`. If `None`, all features are assumed to be continuous. .. versionadded:: 1.2 Attributes ---------- bounding_ax_ : matplotlib Axes or None If `ax` is an axes or None, the `bounding_ax_` is the axes where the grid of partial dependence plots are drawn. If `ax` is a list of axes or a numpy array of axes, `bounding_ax_` is None. axes_ : ndarray of matplotlib Axes If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item in `ax`. Elements that are None correspond to a nonexisting axes in that position. lines_ : ndarray of matplotlib Artists If `ax` is an axes or None, `lines_[i, j]` is the partial dependence curve on the i-th row and j-th column. If `ax` is a list of axes, `lines_[i]` is the partial dependence curve corresponding to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a line plot. deciles_vlines_ : ndarray of matplotlib LineCollection If `ax` is an axes or None, `vlines_[i, j]` is the line collection representing the x axis deciles of the i-th row and j-th column. If `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a PDP plot. .. versionadded:: 0.23 deciles_hlines_ : ndarray of matplotlib LineCollection If `ax` is an axes or None, `vlines_[i, j]` is the line collection representing the y axis deciles of the i-th row and j-th column. If `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a 2-way plot. .. versionadded:: 0.23 contours_ : ndarray of matplotlib Artists If `ax` is an axes or None, `contours_[i, j]` is the partial dependence plot on the i-th row and j-th column. If `ax` is a list of axes, `contours_[i]` is the partial dependence plot corresponding to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a contour plot. bars_ : ndarray of matplotlib Artists If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar plot on the i-th row and j-th column (for a categorical feature). If `ax` is a list of axes, `bars_[i]` is the partial dependence bar plot corresponding to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a bar plot. .. versionadded:: 1.2 heatmaps_ : ndarray of matplotlib Artists If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence heatmap on the i-th row and j-th column (for a pair of categorical features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial dependence heatmap corresponding to the i-th item in `ax`. Elements that are None correspond to a nonexisting axes or an axes that does not include a heatmap. .. versionadded:: 1.2 figure_ : matplotlib Figure Figure containing partial dependence plots. See Also -------- partial_dependence : Compute Partial Dependence values. PartialDependenceDisplay.from_estimator : Plot Partial Dependence. Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> from sklearn.inspection import PartialDependenceDisplay >>> from sklearn.inspection import partial_dependence >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])] >>> deciles = {0: np.linspace(0, 1, num=5)} >>> pd_results = partial_dependence( ... clf, X, features=0, kind="average", grid_resolution=5) >>> display = PartialDependenceDisplay( ... [pd_results], features=features, feature_names=feature_names, ... target_idx=0, deciles=deciles ... ) >>> display.plot(pdp_lim={1: (-1.38, 0.66)}) <...> >>> plt.show() """ def __init__( self, pd_results, *, features, feature_names, target_idx, deciles, kind="average", subsample=1000, random_state=None, is_categorical=None, ): self.pd_results = pd_results self.features = features self.feature_names = feature_names self.target_idx = target_idx self.deciles = deciles self.kind = kind self.subsample = subsample self.random_state = random_state self.is_categorical = is_categorical @classmethod def from_estimator( cls, estimator, X, features, *, sample_weight=None, categorical_features=None, feature_names=None, target=None, response_method="auto", n_cols=3, grid_resolution=100, percentiles=(0.05, 0.95), custom_values=None, method="auto", n_jobs=None, verbose=0, line_kw=None, ice_lines_kw=None, pd_line_kw=None, contour_kw=None, ax=None, kind="average", centered=False, subsample=1000, random_state=None, ): """Partial dependence (PD) and individual conditional expectation (ICE) plots. Partial dependence plots, individual conditional expectation plots, or an overlay of both can be plotted by setting the `kind` parameter. This method generates one plot for each entry in `features`. The plots are arranged in a grid with `n_cols` columns. For one-way partial dependence plots, the deciles of the feature values are shown on the x-axis. For two-way plots, the deciles are shown on both axes and PDPs are contour plots. For general information regarding `scikit-learn` visualization tools, see the :ref:`Visualization Guide <visualizations>`. For guidance on interpreting these plots, refer to the :ref:`Inspection Guide <partial_dependence>`. For an example on how to use this class method, see :ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`. .. note:: :func:`PartialDependenceDisplay.from_estimator` does not support using the same axes with multiple calls. To plot the partial dependence for multiple estimators, please pass the axes created by the first call to the second call:: >>> from sklearn.inspection import PartialDependenceDisplay >>> from sklearn.datasets import make_friedman1 >>> from sklearn.linear_model import LinearRegression >>> from sklearn.ensemble import RandomForestRegressor >>> X, y = make_friedman1() >>> est1 = LinearRegression().fit(X, y) >>> est2 = RandomForestRegressor().fit(X, y) >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X, ... [1, 2]) >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2], ... ax=disp1.axes_) .. warning:: For :class:`~sklearn.ensemble.GradientBoostingClassifier` and :class:`~sklearn.ensemble.GradientBoostingRegressor`, the `'recursion'` method (used by default) will not account for the `init` predictor of the boosting process. In practice, this will produce the same values as `'brute'` up to a constant offset in the target response, provided that `init` is a constant estimator (which is the default). However, if `init` is not a constant estimator, the partial dependence values are incorrect for `'recursion'` because the offset will be sample-dependent. It is preferable to use the `'brute'` method. Note that this only applies to :class:`~sklearn.ensemble.GradientBoostingClassifier` and :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. .. versionadded:: 1.0 Parameters ---------- estimator : BaseEstimator A fitted estimator object implementing :term:`predict`, :term:`predict_proba`, or :term:`decision_function`. Multioutput-multiclass classifiers are not supported. X : {array-like, dataframe} of shape (n_samples, n_features) ``X`` is used to generate a grid of values for the target ``features`` (where the partial dependence will be evaluated), and also to generate values for the complement features when the `method` is `'brute'`. features : list of {int, str, pair of int, pair of str} The target features for which to create the PDPs. If `features[i]` is an integer or a string, a one-way PDP is created; if `features[i]` is a tuple, a two-way PDP is created (only supported with `kind='average'`). Each tuple must be of size 2. If any entry is a string, then it must be in ``feature_names``. sample_weight : array-like of shape (n_samples,), default=None Sample weights are used to calculate weighted means when averaging the model output. If `None`, then samples are equally weighted. If `sample_weight` is not `None`, then `method` will be set to `'brute'`. Note that `sample_weight` is ignored for `kind='individual'`. .. versionadded:: 1.3 categorical_features : array-like of shape (n_features,) or shape \ (n_categorical_features,), dtype={bool, int, str}, default=None Indicates the categorical features. - `None`: no feature will be considered categorical; - boolean array-like: boolean mask of shape `(n_features,)` indicating which features are categorical. Thus, this array has the same shape has `X.shape[1]`; - integer or string array-like: integer indices or strings indicating categorical features. .. versionadded:: 1.2 feature_names : array-like of shape (n_features,), dtype=str, default=None Name of each feature; `feature_names[i]` holds the name of the feature with index `i`. By default, the name of the feature corresponds to their numerical index for NumPy array and their column name for pandas dataframe. target : int, default=None - In a multiclass setting, specifies the class for which the PDPs should be computed. Note that for binary classification, the positive class (index 1) is always used. - In a multioutput setting, specifies the task for which the PDPs should be computed. Ignored in binary classification or classical regression settings. response_method : {'auto', 'predict_proba', 'decision_function'}, \ default='auto' Specifies whether to use :term:`predict_proba` or :term:`decision_function` as the target response. For regressors this parameter is ignored and the response is always the output of :term:`predict`. By default, :term:`predict_proba` is tried first and we revert to :term:`decision_function` if it doesn't exist. If ``method`` is `'recursion'`, the response is always the output of :term:`decision_function`. n_cols : int, default=3 The maximum number of columns in the grid plot. Only active when `ax` is a single axis or `None`. grid_resolution : int, default=100 The number of equally spaced points on the axes of the plots, for each target feature. This parameter is overridden by `custom_values` if that parameter is set. percentiles : tuple of float, default=(0.05, 0.95) The lower and upper percentile used to create the extreme values for the PDP axes. Must be in [0, 1]. This parameter is overridden by `custom_values` if that parameter is set. custom_values : dict A dictionary mapping the index of an element of `features` to an array of values where the partial dependence should be calculated for that feature. Setting a range of values for a feature overrides `grid_resolution` and `percentiles`. .. versionadded:: 1.7 method : str, default='auto' The method used to calculate the averaged predictions: - `'recursion'` is only supported for some tree-based estimators (namely :class:`~sklearn.ensemble.GradientBoostingClassifier`, :class:`~sklearn.ensemble.GradientBoostingRegressor`, :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, :class:`~sklearn.tree.DecisionTreeRegressor`, :class:`~sklearn.ensemble.RandomForestRegressor` but is more efficient in terms of speed. With this method, the target response of a classifier is always the decision function, not the predicted probabilities. Since the `'recursion'` method implicitly computes the average of the ICEs by design, it is not compatible with ICE and thus `kind` must be `'average'`. - `'brute'` is supported for any estimator, but is more computationally intensive. - `'auto'`: the `'recursion'` is used for estimators that support it, and `'brute'` is used otherwise. If `sample_weight` is not `None`, then `'brute'` is used regardless of the estimator. Please see :ref:`this note <pdp_method_differences>` for differences between the `'brute'` and `'recursion'` method. n_jobs : int, default=None The number of CPUs to use to compute the partial dependences. Computation is parallelized over features specified by the `features` parameter. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. verbose : int, default=0 Verbose output during PD computations. line_kw : dict, default=None Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. For one-way partial dependence plots. It can be used to define common properties for both `ice_lines_kw` and `pdp_line_kw`. ice_lines_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For ICE lines in the one-way partial dependence plots. The key value pairs defined in `ice_lines_kw` takes priority over `line_kw`. pd_line_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For partial dependence in one-way partial dependence plots. The key value pairs defined in `pd_line_kw` takes priority over `line_kw`. contour_kw : dict, default=None Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call. For two-way partial dependence plots. ax : Matplotlib axes or array-like of Matplotlib axes, default=None - If a single axis is passed in, it is treated as a bounding axes and a grid of partial dependence plots will be drawn within these bounds. The `n_cols` parameter controls the number of columns in the grid. - If an array-like of axes are passed in, the partial dependence plots will be drawn directly into these axes. - If `None`, a figure and a bounding axes is created and treated as the single axes case. kind : {'average', 'individual', 'both'}, default='average' Whether to plot the partial dependence averaged across all the samples in the dataset or one line per sample or both. - ``kind='average'`` results in the traditional PD plot; - ``kind='individual'`` results in the ICE plot. Note that the fast `method='recursion'` option is only available for `kind='average'` and `sample_weights=None`. Computing individual dependencies and doing weighted averages requires using the slower `method='brute'`. centered : bool, default=False If `True`, the ICE and PD lines will start at the origin of the y-axis. By default, no centering is done. .. versionadded:: 1.1 subsample : float, int or None, default=1000 Sampling for ICE curves when `kind` is 'individual' or 'both'. If `float`, should be between 0.0 and 1.0 and represent the proportion of the dataset to be used to plot ICE curves. If `int`, represents the absolute number samples to use. Note that the full dataset is still used to calculate averaged partial dependence when `kind='both'`. random_state : int, RandomState instance or None, default=None Controls the randomness of the selected samples when subsamples is not `None` and `kind` is either `'both'` or `'individual'`. See :term:`Glossary <random_state>` for details. Returns ------- display : :class:`~sklearn.inspection.PartialDependenceDisplay` See Also -------- partial_dependence : Compute Partial Dependence values. Examples -------- >>> import matplotlib.pyplot as plt >>> from sklearn.datasets import make_friedman1 >>> from sklearn.ensemble import GradientBoostingRegressor >>> from sklearn.inspection import PartialDependenceDisplay >>> X, y = make_friedman1() >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) >>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)]) <...> >>> plt.show() """ check_matplotlib_support(f"{cls.__name__}.from_estimator") import matplotlib.pyplot as plt # set target_idx for multi-class estimators if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2: if target is None: raise ValueError("target must be specified for multi-class") target_idx = np.searchsorted(estimator.classes_, target) if ( not (0 <= target_idx < len(estimator.classes_)) or estimator.classes_[target_idx] != target ): raise ValueError("target not in est.classes_, got {}".format(target)) else: # regression and binary classification target_idx = 0 # Use check_array only on lists and other non-array-likes / sparse. Do not # convert DataFrame into a NumPy array. if not (hasattr(X, "__array__") or sparse.issparse(X)): X = check_array(X, ensure_all_finite="allow-nan", dtype=object) n_features = X.shape[1] feature_names = _check_feature_names(X, feature_names) # expand kind to always be a list of str kind_ = [kind] * len(features) if isinstance(kind, str) else kind if len(kind_) != len(features): raise ValueError( "When `kind` is provided as a list of strings, it should contain " f"as many elements as `features`. `kind` contains {len(kind_)} " f"element(s) and `features` contains {len(features)} element(s)." ) # convert features into a seq of int tuples tmp_features, ice_for_two_way_pd = [], [] for kind_plot, fxs in zip(kind_, features): if isinstance(fxs, (numbers.Integral, str)): fxs = (fxs,) try: fxs = tuple( _get_feature_index(fx, feature_names=feature_names) for fx in fxs ) except TypeError as e: raise ValueError( "Each entry in features must be either an int, " "a string, or an iterable of size at most 2." ) from e if not 1 <= np.size(fxs) <= 2: raise ValueError( "Each entry in features must be either an int, " "a string, or an iterable of size at most 2." ) # store the information if 2-way PD was requested with ICE to later # raise a ValueError with an exhaustive list of problematic # settings. ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1) tmp_features.append(fxs) if any(ice_for_two_way_pd): # raise an error and be specific regarding the parameter values # when 1- and 2-way PD were requested kind_ = [ "average" if forcing_average else kind_plot for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_) ] raise ValueError( "ICE plot cannot be rendered for 2-way feature interactions. " "2-way feature interactions mandates PD plots using the " "'average' kind: " f"features={features!r} should be configured to use " f"kind={kind_!r} explicitly." ) features = tmp_features if categorical_features is None: is_categorical = [ (False,) if len(fxs) == 1 else (False, False) for fxs in features ] else: # we need to create a boolean indicator of which features are # categorical from the categorical_features list. categorical_features = np.asarray(categorical_features) if categorical_features.dtype.kind == "b": # categorical features provided as a list of boolean if categorical_features.size != n_features: raise ValueError( "When `categorical_features` is a boolean array-like, " "the array should be of shape (n_features,). Got " f"{categorical_features.size} elements while `X` contains " f"{n_features} features." ) is_categorical = [ tuple(categorical_features[fx] for fx in fxs) for fxs in features ] elif categorical_features.dtype.kind in ("i", "O", "U"): # categorical features provided as a list of indices or feature names categorical_features_idx = [ _get_feature_index(cat, feature_names=feature_names) for cat in categorical_features ] is_categorical = [ tuple([idx in categorical_features_idx for idx in fxs]) for fxs in features ] else: raise ValueError( "Expected `categorical_features` to be an array-like of boolean," f" integer, or string. Got {categorical_features.dtype} instead." ) for cats in is_categorical: if np.size(cats) == 2 and (cats[0] != cats[1]): raise ValueError( "Two-way partial dependence plots are not supported for pairs" " of continuous and categorical features." ) # collect the indices of the categorical features targeted by the partial # dependence computation categorical_features_targeted = set( [ fx for fxs, cats in zip(features, is_categorical) for fx in fxs if any(cats) ] ) if categorical_features_targeted: min_n_cats = min( [ len(_unique(_safe_indexing(X, idx, axis=1))) for idx in categorical_features_targeted ] ) if grid_resolution < min_n_cats: raise ValueError( "The resolution of the computed grid is less than the " "minimum number of categories in the targeted categorical " "features. Expect the `grid_resolution` to be greater than " f"{min_n_cats}. Got {grid_resolution} instead." ) for is_cat, kind_plot in zip(is_categorical, kind_): if any(is_cat) and kind_plot != "average": raise ValueError( "It is not possible to display individual effects for" " categorical features." ) # Early exit if the axes does not have the correct number of axes if ax is not None and not isinstance(ax, plt.Axes): axes = np.asarray(ax, dtype=object) if axes.size != len(features): raise ValueError( "Expected ax to have {} axes, got {}".format( len(features), axes.size ) ) for i in chain.from_iterable(features): if i >= len(feature_names): raise ValueError( "All entries of features must be less than " "len(feature_names) = {0}, got {1}.".format(len(feature_names), i) ) if isinstance(subsample, numbers.Integral): if subsample <= 0: raise ValueError( f"When an integer, subsample={subsample} should be positive." ) elif isinstance(subsample, numbers.Real): if subsample <= 0 or subsample >= 1: raise ValueError( f"When a floating-point, subsample={subsample} should be in " "the (0, 1) range." ) # compute predictions and/or averaged predictions pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(partial_dependence)( estimator, X, fxs, sample_weight=sample_weight, feature_names=feature_names, categorical_features=categorical_features, response_method=response_method, method=method, grid_resolution=grid_resolution, percentiles=percentiles, kind=kind_plot, custom_values=custom_values, ) for kind_plot, fxs in zip(kind_, features) ) # For multioutput regression, we can only check the validity of target # now that we have the predictions. # Also note: as multiclass-multioutput classifiers are not supported, # multiclass and multioutput scenario are mutually exclusive. So there is # no risk of overwriting target_idx here. pd_result = pd_results[0] # checking the first result is enough n_tasks = ( pd_result.average.shape[0] if kind_[0] == "average" else pd_result.individual.shape[0] ) if is_regressor(estimator) and n_tasks > 1: if target is None: raise ValueError("target must be specified for multi-output regressors") if not 0 <= target <= n_tasks: raise ValueError( "target must be in [0, n_tasks], got {}.".format(target) ) target_idx = target deciles = {} for fxs, cats in zip(features, is_categorical): for fx, cat in zip(fxs, cats): if not cat and fx not in deciles: X_col = _safe_indexing(X, fx, axis=1) deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1)) display = cls( pd_results=pd_results, features=features, feature_names=feature_names, target_idx=target_idx, deciles=deciles, kind=kind, subsample=subsample, random_state=random_state, is_categorical=is_categorical, ) return display.plot( ax=ax, n_cols=n_cols, line_kw=line_kw, ice_lines_kw=ice_lines_kw, pd_line_kw=pd_line_kw, contour_kw=contour_kw, centered=centered, ) def _get_sample_count(self, n_samples): """Compute the number of samples as an integer.""" if isinstance(self.subsample, numbers.Integral): if self.subsample < n_samples: return self.subsample return n_samples elif isinstance(self.subsample, numbers.Real): return ceil(n_samples * self.subsample) return n_samples def _plot_ice_lines( self, preds, feature_values, n_ice_to_plot, ax, pd_plot_idx, n_total_lines_by_plot, individual_line_kw, ): """Plot the ICE lines. Parameters ---------- preds : ndarray of shape \ (n_instances, n_grid_points) The predictions computed for all points of `feature_values` for a given feature for all samples in `X`. feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. n_ice_to_plot : int The number of ICE lines to plot. ax : Matplotlib axes The axis on which to plot the ICE lines. pd_plot_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. n_total_lines_by_plot : int The total number of lines expected to be plot on the axis. individual_line_kw : dict Dict with keywords passed when plotting the ICE lines. """ rng = check_random_state(self.random_state) # subsample ice ice_lines_idx = rng.choice( preds.shape[0], n_ice_to_plot, replace=False, ) ice_lines_subsampled = preds[ice_lines_idx, :] # plot the subsampled ice for ice_idx, ice in enumerate(ice_lines_subsampled): line_idx = np.unravel_index( pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape ) self.lines_[line_idx] = ax.plot( feature_values, ice.ravel(), **individual_line_kw )[0] def _plot_average_dependence( self, avg_preds, feature_values, ax, pd_line_idx, line_kw, categorical, bar_kw, ): """Plot the average partial dependence. Parameters ---------- avg_preds : ndarray of shape (n_grid_points,) The average predictions for all points of `feature_values` for a given feature for all samples in `X`. feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. ax : Matplotlib axes The axis on which to plot the average PD. pd_line_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. line_kw : dict Dict with keywords passed when plotting the PD plot. categorical : bool Whether feature is categorical. bar_kw: dict Dict with keywords passed when plotting the PD bars (categorical). """ if categorical: bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape) self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0] ax.tick_params(axis="x", rotation=90) else: line_idx = np.unravel_index(pd_line_idx, self.lines_.shape) self.lines_[line_idx] = ax.plot( feature_values, avg_preds, **line_kw, )[0] def _plot_one_way_partial_dependence( self, kind, preds, avg_preds, feature_values, feature_idx, n_ice_lines, ax, n_cols, pd_plot_idx, n_lines, ice_lines_kw, pd_line_kw, categorical, bar_kw, pdp_lim, ): """Plot 1-way partial dependence: ICE and PDP. Parameters ---------- kind : str The kind of partial plot to draw. preds : ndarray of shape \ (n_instances, n_grid_points) or None The predictions computed for all points of `feature_values` for a given feature for all samples in `X`. avg_preds : ndarray of shape (n_grid_points,) The average predictions for all points of `feature_values` for a given feature for all samples in `X`. feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. feature_idx : int The index corresponding to the target feature. n_ice_lines : int The number of ICE lines to plot. ax : Matplotlib axes The axis on which to plot the ICE and PDP lines. n_cols : int or None The number of column in the axis. pd_plot_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. n_lines : int The total number of lines expected to be plot on the axis. ice_lines_kw : dict Dict with keywords passed when plotting the ICE lines. pd_line_kw : dict Dict with keywords passed when plotting the PD plot. categorical : bool Whether feature is categorical. bar_kw: dict Dict with keywords passed when plotting the PD bars (categorical). pdp_lim : dict Global min and max average predictions, such that all plots will have the same scale and y limits. `pdp_lim[1]` is the global min and max for single partial dependence curves. """ from matplotlib import transforms if kind in ("individual", "both"): self._plot_ice_lines( preds[self.target_idx], feature_values, n_ice_lines, ax, pd_plot_idx, n_lines, ice_lines_kw, ) if kind in ("average", "both"): # the average is stored as the last line if kind == "average": pd_line_idx = pd_plot_idx else: pd_line_idx = pd_plot_idx * n_lines + n_ice_lines self._plot_average_dependence( avg_preds[self.target_idx].ravel(), feature_values, ax, pd_line_idx, pd_line_kw, categorical, bar_kw, ) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # create the decile line for the vertical axis vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) if self.deciles.get(feature_idx[0], None) is not None: self.deciles_vlines_[vlines_idx] = ax.vlines( self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color="k", ) # reset ylim which was overwritten by vlines min_val = min(val[0] for val in pdp_lim.values()) max_val = max(val[1] for val in pdp_lim.values()) ax.set_ylim([min_val, max_val]) # Set xlabel if it is not already set if not ax.get_xlabel(): ax.set_xlabel(self.feature_names[feature_idx[0]]) if n_cols is None or pd_plot_idx % n_cols == 0: if not ax.get_ylabel(): ax.set_ylabel("Partial dependence") else: ax.set_yticklabels([]) if pd_line_kw.get("label", None) and kind != "individual" and not categorical: ax.legend() def _plot_two_way_partial_dependence( self, avg_preds, feature_values, feature_idx, ax, pd_plot_idx, Z_level, contour_kw, categorical, heatmap_kw, ): """Plot 2-way partial dependence. Parameters ---------- avg_preds : ndarray of shape \ (n_instances, n_grid_points, n_grid_points) The average predictions for all points of `feature_values[0]` and `feature_values[1]` for some given features for all samples in `X`. feature_values : seq of 1d array A sequence of array of the feature values for which the predictions have been computed. feature_idx : tuple of int The indices of the target features ax : Matplotlib axes The axis on which to plot the ICE and PDP lines. pd_plot_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. Z_level : ndarray of shape (8, 8) The Z-level used to encode the average predictions. contour_kw : dict Dict with keywords passed when plotting the contours. categorical : bool Whether features are categorical. heatmap_kw: dict Dict with keywords passed when plotting the PD heatmap (categorical). """ if categorical: import matplotlib.pyplot as plt default_im_kw = dict(interpolation="nearest", cmap="viridis") im_kw = {**default_im_kw, **heatmap_kw} data = avg_preds[self.target_idx] im = ax.imshow(data, **im_kw) text = None cmap_min, cmap_max = im.cmap(0), im.cmap(1.0) text = np.empty_like(data, dtype=object) # print text with appropriate color depending on background thresh = (data.max() + data.min()) / 2.0 for flat_index in range(data.size): row, col = np.unravel_index(flat_index, data.shape) color = cmap_max if data[row, col] < thresh else cmap_min values_format = ".2f" text_data = format(data[row, col], values_format) text_kwargs = dict(ha="center", va="center", color=color) text[row, col] = ax.text(col, row, text_data, **text_kwargs) fig = ax.figure fig.colorbar(im, ax=ax) ax.set( xticks=np.arange(len(feature_values[1])), yticks=np.arange(len(feature_values[0])), xticklabels=feature_values[1], yticklabels=feature_values[0], xlabel=self.feature_names[feature_idx[1]], ylabel=self.feature_names[feature_idx[0]], ) plt.setp(ax.get_xticklabels(), rotation="vertical") heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) self.heatmaps_[heatmap_idx] = im else: from matplotlib import transforms XX, YY = np.meshgrid(feature_values[0], feature_values[1]) Z = avg_preds[self.target_idx].T CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k") contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) self.contours_[contour_idx] = ax.contourf( XX, YY, Z, levels=Z_level, vmax=Z_level[-1], vmin=Z_level[0], **contour_kw, ) ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True) trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) # create the decile line for the vertical axis xlim, ylim = ax.get_xlim(), ax.get_ylim() vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) self.deciles_vlines_[vlines_idx] = ax.vlines( self.deciles[feature_idx[0]], 0, 0.05, transform=trans, color="k", ) # create the decile line for the horizontal axis hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) self.deciles_hlines_[hlines_idx] = ax.hlines( self.deciles[feature_idx[1]], 0, 0.05, transform=trans, color="k", ) # reset xlim and ylim since they are overwritten by hlines and # vlines ax.set_xlim(xlim) ax.set_ylim(ylim) # set xlabel if it is not already set if not ax.get_xlabel(): ax.set_xlabel(self.feature_names[feature_idx[0]]) ax.set_ylabel(self.feature_names[feature_idx[1]]) def plot( self, *, ax=None, n_cols=3, line_kw=None, ice_lines_kw=None, pd_line_kw=None, contour_kw=None, bar_kw=None, heatmap_kw=None, pdp_lim=None, centered=False, ): """Plot partial dependence plots. Parameters ---------- ax : Matplotlib axes or array-like of Matplotlib axes, default=None - If a single axis is passed in, it is treated as a bounding axes and a grid of partial dependence plots will be drawn within these bounds. The `n_cols` parameter controls the number of columns in the grid. - If an array-like of axes are passed in, the partial dependence plots will be drawn directly into these axes. - If `None`, a figure and a bounding axes is created and treated as the single axes case. n_cols : int, default=3 The maximum number of columns in the grid plot. Only active when `ax` is a single axes or `None`. line_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.plot` call. For one-way partial dependence plots. ice_lines_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For ICE lines in the one-way partial dependence plots. The key value pairs defined in `ice_lines_kw` takes priority over `line_kw`. .. versionadded:: 1.0 pd_line_kw : dict, default=None Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. For partial dependence in one-way partial dependence plots. The key value pairs defined in `pd_line_kw` takes priority over `line_kw`. .. versionadded:: 1.0 contour_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.contourf` call for two-way partial dependence plots. bar_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.bar` call for one-way categorical partial dependence plots. .. versionadded:: 1.2 heatmap_kw : dict, default=None Dict with keywords passed to the `matplotlib.pyplot.imshow` call for two-way categorical partial dependence plots. .. versionadded:: 1.2 pdp_lim : dict, default=None Global min and max average predictions, such that all plots will have the same scale and y limits. `pdp_lim[1]` is the global min and max for single partial dependence curves. `pdp_lim[2]` is the global min and max for two-way partial dependence curves. If `None` (default), the limit will be inferred from the global minimum and maximum of all predictions. .. versionadded:: 1.1 centered : bool, default=False If `True`, the ICE and PD lines will start at the origin of the y-axis. By default, no centering is done. .. versionadded:: 1.1 Returns ------- display : :class:`~sklearn.inspection.PartialDependenceDisplay` Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` object that contains the partial dependence plots. """ check_matplotlib_support("plot_partial_dependence") import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpecFromSubplotSpec if isinstance(self.kind, str): kind = [self.kind] * len(self.features) else: kind = self.kind if self.is_categorical is None: is_categorical = [ (False,) if len(fx) == 1 else (False, False) for fx in self.features ] else: is_categorical = self.is_categorical if len(kind) != len(self.features): raise ValueError( "When `kind` is provided as a list of strings, it should " "contain as many elements as `features`. `kind` contains " f"{len(kind)} element(s) and `features` contains " f"{len(self.features)} element(s)." ) valid_kinds = {"average", "individual", "both"} if any([k not in valid_kinds for k in kind]): raise ValueError( f"Values provided to `kind` must be one of: {valid_kinds!r} or a list" f" of such values. Currently, kind={self.kind!r}" ) # Center results before plotting if not centered: pd_results_ = self.pd_results else: pd_results_ = [] for kind_plot, pd_result in zip(kind, self.pd_results): current_results = {"grid_values": pd_result["grid_values"]} if kind_plot in ("individual", "both"): preds = pd_result.individual preds = preds - preds[self.target_idx, :, 0, None] current_results["individual"] = preds if kind_plot in ("average", "both"): avg_preds = pd_result.average avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] current_results["average"] = avg_preds pd_results_.append(Bunch(**current_results)) if pdp_lim is None: # get global min and max average predictions of PD grouped by plot type pdp_lim = {} for kind_plot, pdp in zip(kind, pd_results_): values = pdp["grid_values"] preds = pdp.average if kind_plot == "average" else pdp.individual min_pd = preds[self.target_idx].min() max_pd = preds[self.target_idx].max() # expand the limits to account so that the plotted lines do not touch # the edges of the plot span = max_pd - min_pd min_pd -= 0.05 * span max_pd += 0.05 * span n_fx = len(values) old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) min_pd = min(min_pd, old_min_pd) max_pd = max(max_pd, old_max_pd) pdp_lim[n_fx] = (min_pd, max_pd) if line_kw is None: line_kw = {} if ice_lines_kw is None: ice_lines_kw = {} if pd_line_kw is None: pd_line_kw = {} if bar_kw is None: bar_kw = {} if heatmap_kw is None: heatmap_kw = {} if ax is None: _, ax = plt.subplots() if contour_kw is None: contour_kw = {} default_contour_kws = {"alpha": 0.75} contour_kw = _validate_style_kwargs(default_contour_kws, contour_kw) n_features = len(self.features) is_average_plot = [kind_plot == "average" for kind_plot in kind] if all(is_average_plot): # only average plots are requested n_ice_lines = 0 n_lines = 1 else: # we need to determine the number of ICE samples computed ice_plot_idx = is_average_plot.index(False) n_ice_lines = self._get_sample_count( len(pd_results_[ice_plot_idx].individual[0]) ) if any([kind_plot == "both" for kind_plot in kind]): n_lines = n_ice_lines + 1 # account for the average line else: n_lines = n_ice_lines if isinstance(ax, plt.Axes): # If ax was set off, it has most likely been set to off # by a previous call to plot. if not ax.axison: raise ValueError( "The ax was already used in another plot " "function, please set ax=display.axes_ " "instead" ) ax.set_axis_off() self.bounding_ax_ = ax self.figure_ = ax.figure n_cols = min(n_cols, n_features) n_rows = int(np.ceil(n_features / float(n_cols))) self.axes_ = np.empty((n_rows, n_cols), dtype=object) if all(is_average_plot): self.lines_ = np.empty((n_rows, n_cols), dtype=object) else: self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) self.contours_ = np.empty((n_rows, n_cols), dtype=object) self.bars_ = np.empty((n_rows, n_cols), dtype=object) self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) axes_ravel = self.axes_.ravel() gs = GridSpecFromSubplotSpec( n_rows, n_cols, subplot_spec=ax.get_subplotspec() ) for i, spec in zip(range(n_features), gs): axes_ravel[i] = self.figure_.add_subplot(spec) else: # array-like ax = np.asarray(ax, dtype=object) if ax.size != n_features: raise ValueError( "Expected ax to have {} axes, got {}".format(n_features, ax.size) ) if ax.ndim == 2: n_cols = ax.shape[1] else: n_cols = None self.bounding_ax_ = None self.figure_ = ax.ravel()[0].figure self.axes_ = ax if all(is_average_plot): self.lines_ = np.empty_like(ax, dtype=object) else: self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) self.contours_ = np.empty_like(ax, dtype=object) self.bars_ = np.empty_like(ax, dtype=object) self.heatmaps_ = np.empty_like(ax, dtype=object) # create contour levels for two-way plots if 2 in pdp_lim: Z_level = np.linspace(*pdp_lim[2], num=8) self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate( zip( self.axes_.ravel(), self.features, is_categorical, pd_results_, kind, ) ): avg_preds = None preds = None feature_values = pd_result["grid_values"] if kind_plot == "individual": preds = pd_result.individual elif kind_plot == "average": avg_preds = pd_result.average else: # kind_plot == 'both' avg_preds = pd_result.average preds = pd_result.individual if len(feature_values) == 1: # define the line-style for the current plot default_line_kws = { "color": "C0", "label": "average" if kind_plot == "both" else None, } if kind_plot == "individual": default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5} default_pd_lines_kws = {} elif kind_plot == "both": # by default, we need to distinguish the average line from # the individual lines via color and line style default_ice_lines_kws = { "alpha": 0.3, "linewidth": 0.5, "color": "tab:blue", } default_pd_lines_kws = { "color": "tab:orange", "linestyle": "--", } else: default_ice_lines_kws = {} default_pd_lines_kws = {} default_ice_lines_kws = {**default_line_kws, **default_ice_lines_kws} default_pd_lines_kws = {**default_line_kws, **default_pd_lines_kws} line_kw = _validate_style_kwargs(default_line_kws, line_kw) ice_lines_kw = _validate_style_kwargs( _validate_style_kwargs(default_ice_lines_kws, line_kw), ice_lines_kw ) del ice_lines_kw["label"] pd_line_kw = _validate_style_kwargs( _validate_style_kwargs(default_pd_lines_kws, line_kw), pd_line_kw ) default_bar_kws = {"color": "C0"} bar_kw = _validate_style_kwargs(default_bar_kws, bar_kw) default_heatmap_kw = {} heatmap_kw = _validate_style_kwargs(default_heatmap_kw, heatmap_kw) self._plot_one_way_partial_dependence( kind_plot, preds, avg_preds, feature_values[0], feature_idx, n_ice_lines, axi, n_cols, pd_plot_idx, n_lines, ice_lines_kw, pd_line_kw, cat[0], bar_kw, pdp_lim, ) else: self._plot_two_way_partial_dependence( avg_preds, feature_values, feature_idx, axi, pd_plot_idx, Z_level, contour_kw, cat[0] and cat[1], heatmap_kw, ) return self
python
github
https://github.com/scikit-learn/scikit-learn
sklearn/inspection/_plot/partial_dependence.py
#!/usr/bin/env/python # -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # LICENSE # # Copyright (c) 2010-2013, GEM Foundation, G. Weatherill, M. Pagani, # D. Monelli. # # The Hazard Modeller's Toolkit is free software: you can redistribute # it and/or modify it under the terms of the GNU Affero General Public #License as published by the Free Software Foundation, either version #3 of the License, or (at your option) any later version. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/> # #DISCLAIMER # # The software Hazard Modeller's Toolkit (hmtk) provided herein #is released as a prototype implementation on behalf of # scientists and engineers working within the GEM Foundation (Global #Earthquake Model). # # It is distributed for the purpose of open collaboration and in the # hope that it will be useful to the scientific, engineering, disaster # risk and software design communities. # # The software is NOT distributed as part of GEM's OpenQuake suite # (http://www.globalquakemodel.org/openquake) and must be considered as a # separate entity. The software provided herein is designed and implemented # by scientific staff. It is not developed to the design standards, nor # subject to same level of critical review by professional software # developers, as GEM's OpenQuake software suite. # # Feedback and contribution to the software is welcome, and can be # directed to the hazard scientific staff of the GEM Model Facility # (hazard@globalquakemodel.org). # # The Hazard Modeller's Toolkit (hmtk) is therefore distributed WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # The GEM Foundation, and the authors of the software, assume no # liability for use of the software. ''' Module to test :hmtk.faults.mfd.anderson_luco_arbitrary.AndersonLucoArbitrary class ''' import os import unittest import numpy as np from math import log from openquake.hazardlib.scalerel import WC1994 from hmtk.faults.mfd.anderson_luco_arbitrary import (Type1RecurrenceModel, Type2RecurrenceModel, Type3RecurrenceModel, AndersonLucoArbitrary) BASE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data') AL83_FIG2_MODEL = np.genfromtxt(os.path.join(BASE_DATA_PATH, 'anderson_luco_arbitrary_results.dat')) AL83_INC_DATA = np.genfromtxt(os.path.join(BASE_DATA_PATH, 'anderson_luco_arbitrary_incremental.dat')) class TestType1Recurrence(unittest.TestCase): ''' Tests the Recurrence function of the Anderson & Luco (1983) arbitrary type 1 model ''' def setUp(self): ''' ''' self.magnitudes = AL83_FIG2_MODEL[:, 0] self.model = Type1RecurrenceModel() self.mmax = 8.0 self.bbar = 1.0 * log(10.) self.dbar = 1.5 * log(10.) def test_recurrence_model_type1(self): ''' Tests the recurrence function In all cases if bbar > dbar (1.5) then models will fail! ''' # Tests 1 - master case - reproduces the N1 line of Figure 2 in # Anderson & Luco (1983) # Requires setting the moment slip to 8.1E+25 dyne-cm moment_slip = 8.1E25 expected_results = AL83_FIG2_MODEL[:, 1] for iloc, mag in enumerate(self.magnitudes): self.assertAlmostEqual(expected_results[iloc], self.model.cumulative_value(moment_slip, self.mmax, mag, self.bbar, self.dbar), 7) class TestType2Recurrence(unittest.TestCase): ''' Tests the Recurrence function of the Anderson & Luco (1983) arbitrary type 2 model ''' def setUp(self): ''' ''' self.magnitudes = AL83_FIG2_MODEL[:, 0] self.model = Type2RecurrenceModel() self.mmax = 8.0 self.bbar = 1.0 * log(10.) self.dbar = 1.5 * log(10.) def test_recurrence_model_type1(self): ''' Tests the recurrence function In all cases if bbar > dbar (1.5) then models will fail! ''' # Tests 1 - master case - reproduces the N2 line of Figure 2 in # Anderson & Luco (1983) # Requires setting the moment slip to 8.1E+25 dyne-cm moment_slip = 8.1E25 expected_results = AL83_FIG2_MODEL[:, 2] for iloc, mag in enumerate(self.magnitudes): self.assertAlmostEqual(expected_results[iloc], self.model.cumulative_value(moment_slip, self.mmax, mag, self.bbar, self.dbar), 7) class TestType3Recurrence(unittest.TestCase): ''' Tests the Recurrence function of the Anderson & Luco (1983) arbitrary type 3 model ''' def setUp(self): ''' ''' self.magnitudes = AL83_FIG2_MODEL[:, 0] self.model = Type3RecurrenceModel() self.mmax = 8.0 self.bbar = 1.0 * log(10.) self.dbar = 1.5 * log(10.) def test_recurrence_model_type1(self): ''' Tests the recurrence function In all cases if bbar > dbar (1.5) then models will fail! ''' # Tests 1 - master case - reproduces the N1 line of Figure 2 in # Anderson & Luco (1983) # Requires setting the moment slip to 8.1E+25 dyne-cm moment_slip = 8.1E25 expected_results = AL83_FIG2_MODEL[:, 3] for iloc, mag in enumerate(self.magnitudes): self.assertAlmostEqual(expected_results[iloc], self.model.cumulative_value(moment_slip, self.mmax, mag, self.bbar, self.dbar), 7) class TestAndersonLucoArbitrary(unittest.TestCase): ''' Tests the Anderson & Luco Arbitrary models :class hmtk.faults.mfd.anderson_luco_arbitrary.AndersonLucoArbitrary ''' def setUp(self): self.model = AndersonLucoArbitrary() self.config = {'Model_Type': 'First', 'MFD_spacing': 0.1, 'Model_Weight': 1.0, 'Minimum_Magnitude': 5.0, 'Maximum_Magnitude': None, 'b_value': [1.0, 0.1]} self.msr = WC1994() def test_case_setup(self): ''' Tests the basic setup ''' expected_dict = {'b_value': 1.0, 'b_value_sigma': 0.1, 'bin_width': 0.1, 'mfd_model': 'Anderson & Luco (Arbitrary) First', 'mfd_type': 'First', 'mfd_weight': 1.0, 'mmax': None, 'mmax_sigma': None, 'mmin': 5.0, 'occurrence_rate': None} self.model.setUp(self.config) self.assertDictEqual(expected_dict, self.model.__dict__) def test_get_mmax(self): ''' Tests the function to get Mmax Values come from WC1994 (tested in openquake.hazardlib) - only functionality is tested for here! ''' # Case 1 MMmax and uncertainty specified in config self.config['Maximum_Magnitude'] = 8.0 self.config['Maximum_Magnitude_Uncertainty'] = 0.2 self.model = AndersonLucoArbitrary() self.model.setUp(self.config) self.model.get_mmax(self.config, self.msr, 0., 8500.) self.assertAlmostEqual(self.model.mmax, 8.0) self.assertAlmostEqual(self.model.mmax_sigma, 0.2) # Case 2: Mmax and uncertainty not specified in config self.config['Maximum_Magnitude'] = None self.config['Maximum_Magnitude_Uncertainty'] = None self.model = AndersonLucoArbitrary() self.model.setUp(self.config) self.model.get_mmax(self.config, self.msr, 0., 8500.) self.assertAlmostEqual(self.model.mmax, 7.9880073) self.assertAlmostEqual(self.model.mmax_sigma, 0.23) def test_get_mfd(self): ''' Tests the function to get magnitude frequency distribution ''' self.msr = WC1994() # Test 1: For a fault with 20 mm/yr slip, and an area of 30000 km ** 2 self.msr = WC1994() # Testing all three calculators! for iloc, model_type in enumerate(['First', 'Second', 'Third']): self.model = AndersonLucoArbitrary() self.config = {'Model_Type': model_type, 'MFD_spacing': 0.1, 'Model_Weight': 1.0, 'Minimum_Magnitude': 5.0, 'Maximum_Magnitude': None, 'b_value': [1.0, 0.1]} self.model.setUp(self.config) self.model.get_mmax(self.config, self.msr, 0., 30000.) test_output = self.model.get_mfd(20., 30000.) print AL83_INC_DATA[:, iloc], test_output[2] np.testing.assert_array_almost_equal(AL83_INC_DATA[:, iloc], test_output[2]) # Test case when b-value greater than d-value (raises warning!) self.model = AndersonLucoArbitrary() self.config = {'Model_Type': model_type, 'MFD_spacing': 0.1, 'Model_Weight': 1.0, 'Minimum_Magnitude': 5.0, 'Maximum_Magnitude': None, 'b_value': [2.0, 0.1]} self.model.setUp(self.config) self.model.get_mmax(self.config, self.msr, 0., 30000.) self.model.get_mfd(20., 30000.) self.assertTrue(np.all(np.isnan(self.model.occurrence_rate)))
unknown
codeparrot/codeparrot-clean
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package routes import ( "net/http" "os" "path" "github.com/emicklei/go-restful/v3" ) // Logs adds handlers for the /logs path serving log files from /var/log. type Logs struct{} // Install func registers the logs handler. func (l Logs) Install(c *restful.Container) { // use restful: ws.Route(ws.GET("/logs/{logpath:*}").To(fileHandler)) // See github.com/emicklei/go-restful/blob/master/examples/static/restful-serve-static.go ws := new(restful.WebService) ws.Path("/logs") ws.Doc("get log files") ws.Route(ws.GET("/{logpath:*}").To(logFileHandler).Param(ws.PathParameter("logpath", "path to the log").DataType("string"))) ws.Route(ws.GET("/").To(logFileListHandler)) c.Add(ws) } func logFileHandler(req *restful.Request, resp *restful.Response) { logdir := "/var/log" actual := path.Join(logdir, req.PathParameter("logpath")) // check filename length first, return 404 if it's oversize. if logFileNameIsTooLong(actual) { http.Error(resp, "file not found", http.StatusNotFound) return } http.ServeFile(resp.ResponseWriter, req.Request, actual) } func logFileListHandler(req *restful.Request, resp *restful.Response) { logdir := "/var/log" http.ServeFile(resp.ResponseWriter, req.Request, logdir) } // logFileNameIsTooLong checks filename length, returns true if it's longer than 255. // cause http.ServeFile returns default error code 500 except for NotExist and Forbidden, but we need to separate the real 500 from oversize filename here. func logFileNameIsTooLong(filePath string) bool { _, err := os.Stat(filePath) if err != nil { if e, ok := err.(*os.PathError); ok && e.Err == fileNameTooLong { return true } } return false }
go
github
https://github.com/kubernetes/kubernetes
pkg/routes/logs.go
// Copyright 2014 Manu Martinez-Almeida. All rights reserved. // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. package gin import ( "errors" "fmt" "io" "io/fs" "log" "maps" "math" "mime/multipart" "net" "net/http" "net/url" "os" "path/filepath" "strings" "sync" "time" "github.com/gin-contrib/sse" "github.com/gin-gonic/gin/binding" "github.com/gin-gonic/gin/render" ) // Content-Type MIME of the most common data formats. const ( MIMEJSON = binding.MIMEJSON MIMEHTML = binding.MIMEHTML MIMEXML = binding.MIMEXML MIMEXML2 = binding.MIMEXML2 MIMEPlain = binding.MIMEPlain MIMEPOSTForm = binding.MIMEPOSTForm MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm MIMEYAML = binding.MIMEYAML MIMEYAML2 = binding.MIMEYAML2 MIMETOML = binding.MIMETOML MIMEPROTOBUF = binding.MIMEPROTOBUF MIMEBSON = binding.MIMEBSON ) // BodyBytesKey indicates a default body bytes key. const BodyBytesKey = "_gin-gonic/gin/bodybyteskey" // ContextKey is the key that a Context returns itself for. const ContextKey = "_gin-gonic/gin/contextkey" type ContextKeyType int const ContextRequestKey ContextKeyType = 0 // abortIndex represents a typical value used in abort functions. const abortIndex int8 = math.MaxInt8 >> 1 // Context is the most important part of gin. It allows us to pass variables between middleware, // manage the flow, validate the JSON of a request and render a JSON response for example. type Context struct { writermem responseWriter Request *http.Request Writer ResponseWriter Params Params handlers HandlersChain index int8 fullPath string engine *Engine params *Params skippedNodes *[]skippedNode // This mutex protects Keys map. mu sync.RWMutex // Keys is a key/value pair exclusively for the context of each request. Keys map[any]any // Errors is a list of errors attached to all the handlers/middlewares who used this context. Errors errorMsgs // Accepted defines a list of manually accepted formats for content negotiation. Accepted []string // queryCache caches the query result from c.Request.URL.Query(). queryCache url.Values // formCache caches c.Request.PostForm, which contains the parsed form data from POST, PATCH, // or PUT body parameters. formCache url.Values // SameSite allows a server to define a cookie attribute making it impossible for // the browser to send this cookie along with cross-site requests. sameSite http.SameSite } /************************************/ /********** CONTEXT CREATION ********/ /************************************/ func (c *Context) reset() { c.Writer = &c.writermem c.Params = c.Params[:0] c.handlers = nil c.index = -1 c.fullPath = "" c.Keys = nil c.Errors = c.Errors[:0] c.Accepted = nil c.queryCache = nil c.formCache = nil c.sameSite = 0 *c.params = (*c.params)[:0] *c.skippedNodes = (*c.skippedNodes)[:0] } // Copy returns a copy of the current context that can be safely used outside the request's scope. // This has to be used when the context has to be passed to a goroutine. func (c *Context) Copy() *Context { cp := Context{ writermem: c.writermem, Request: c.Request, engine: c.engine, } cp.writermem.ResponseWriter = nil cp.Writer = &cp.writermem cp.index = abortIndex cp.handlers = nil cp.fullPath = c.fullPath cKeys := c.Keys c.mu.RLock() cp.Keys = maps.Clone(cKeys) c.mu.RUnlock() cParams := c.Params cp.Params = make([]Param, len(cParams)) copy(cp.Params, cParams) return &cp } // HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", // this function will return "main.handleGetUsers". func (c *Context) HandlerName() string { return nameOfFunction(c.handlers.Last()) } // HandlerNames returns a list of all registered handlers for this context in descending order, // following the semantics of HandlerName() func (c *Context) HandlerNames() []string { hn := make([]string, 0, len(c.handlers)) for _, val := range c.handlers { if val == nil { continue } hn = append(hn, nameOfFunction(val)) } return hn } // Handler returns the main handler. func (c *Context) Handler() HandlerFunc { return c.handlers.Last() } // FullPath returns a matched route full path. For not found routes // returns an empty string. // // router.GET("/user/:id", func(c *gin.Context) { // c.FullPath() == "/user/:id" // true // }) func (c *Context) FullPath() string { return c.fullPath } /************************************/ /*********** FLOW CONTROL ***********/ /************************************/ // Next should be used only inside middleware. // It executes the pending handlers in the chain inside the calling handler. // See example in GitHub. func (c *Context) Next() { c.index++ for c.index < safeInt8(len(c.handlers)) { if c.handlers[c.index] != nil { c.handlers[c.index](c) } c.index++ } } // IsAborted returns true if the current context was aborted. func (c *Context) IsAborted() bool { return c.index >= abortIndex } // Abort prevents pending handlers from being called. Note that this will not stop the current handler. // Let's say you have an authorization middleware that validates that the current request is authorized. // If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers // for this request are not called. func (c *Context) Abort() { c.index = abortIndex } // AbortWithStatus calls `Abort()` and writes the headers with the specified status code. // For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401). func (c *Context) AbortWithStatus(code int) { c.Status(code) c.Writer.WriteHeaderNow() c.Abort() } // AbortWithStatusPureJSON calls `Abort()` and then `PureJSON` internally. // This method stops the chain, writes the status code and return a JSON body without escaping. // It also sets the Content-Type as "application/json". func (c *Context) AbortWithStatusPureJSON(code int, jsonObj any) { c.Abort() c.PureJSON(code, jsonObj) } // AbortWithStatusJSON calls `Abort()` and then `JSON` internally. // This method stops the chain, writes the status code and return a JSON body. // It also sets the Content-Type as "application/json". func (c *Context) AbortWithStatusJSON(code int, jsonObj any) { c.Abort() c.JSON(code, jsonObj) } // AbortWithError calls `AbortWithStatus()` and `Error()` internally. // This method stops the chain, writes the status code and pushes the specified error to `c.Errors`. // See Context.Error() for more details. func (c *Context) AbortWithError(code int, err error) *Error { c.AbortWithStatus(code) return c.Error(err) } /************************************/ /********* ERROR MANAGEMENT *********/ /************************************/ // Error attaches an error to the current context. The error is pushed to a list of errors. // It's a good idea to call Error for each error that occurred during the resolution of a request. // A middleware can be used to collect all the errors and push them to a database together, // print a log, or append it in the HTTP response. // Error will panic if err is nil. func (c *Context) Error(err error) *Error { if err == nil { panic("err is nil") } var parsedError *Error ok := errors.As(err, &parsedError) if !ok { parsedError = &Error{ Err: err, Type: ErrorTypePrivate, } } c.Errors = append(c.Errors, parsedError) return parsedError } /************************************/ /******** METADATA MANAGEMENT********/ /************************************/ // Set is used to store a new key/value pair exclusively for this context. // It also lazy initializes c.Keys if it was not used previously. func (c *Context) Set(key any, value any) { c.mu.Lock() defer c.mu.Unlock() if c.Keys == nil { c.Keys = make(map[any]any) } c.Keys[key] = value } // Get returns the value for the given key, ie: (value, true). // If the value does not exist it returns (nil, false) func (c *Context) Get(key any) (value any, exists bool) { c.mu.RLock() defer c.mu.RUnlock() value, exists = c.Keys[key] return } // MustGet returns the value for the given key if it exists, otherwise it panics. func (c *Context) MustGet(key any) any { if value, exists := c.Get(key); exists { return value } panic(fmt.Sprintf("key %v does not exist", key)) } func getTyped[T any](c *Context, key any) (res T) { if val, ok := c.Get(key); ok && val != nil { res, _ = val.(T) } return } // GetString returns the value associated with the key as a string. func (c *Context) GetString(key any) string { return getTyped[string](c, key) } // GetBool returns the value associated with the key as a boolean. func (c *Context) GetBool(key any) bool { return getTyped[bool](c, key) } // GetInt returns the value associated with the key as an integer. func (c *Context) GetInt(key any) int { return getTyped[int](c, key) } // GetInt8 returns the value associated with the key as an integer 8. func (c *Context) GetInt8(key any) int8 { return getTyped[int8](c, key) } // GetInt16 returns the value associated with the key as an integer 16. func (c *Context) GetInt16(key any) int16 { return getTyped[int16](c, key) } // GetInt32 returns the value associated with the key as an integer 32. func (c *Context) GetInt32(key any) int32 { return getTyped[int32](c, key) } // GetInt64 returns the value associated with the key as an integer 64. func (c *Context) GetInt64(key any) int64 { return getTyped[int64](c, key) } // GetUint returns the value associated with the key as an unsigned integer. func (c *Context) GetUint(key any) uint { return getTyped[uint](c, key) } // GetUint8 returns the value associated with the key as an unsigned integer 8. func (c *Context) GetUint8(key any) uint8 { return getTyped[uint8](c, key) } // GetUint16 returns the value associated with the key as an unsigned integer 16. func (c *Context) GetUint16(key any) uint16 { return getTyped[uint16](c, key) } // GetUint32 returns the value associated with the key as an unsigned integer 32. func (c *Context) GetUint32(key any) uint32 { return getTyped[uint32](c, key) } // GetUint64 returns the value associated with the key as an unsigned integer 64. func (c *Context) GetUint64(key any) uint64 { return getTyped[uint64](c, key) } // GetFloat32 returns the value associated with the key as a float32. func (c *Context) GetFloat32(key any) float32 { return getTyped[float32](c, key) } // GetFloat64 returns the value associated with the key as a float64. func (c *Context) GetFloat64(key any) float64 { return getTyped[float64](c, key) } // GetTime returns the value associated with the key as time. func (c *Context) GetTime(key any) time.Time { return getTyped[time.Time](c, key) } // GetDuration returns the value associated with the key as a duration. func (c *Context) GetDuration(key any) time.Duration { return getTyped[time.Duration](c, key) } // GetError returns the value associated with the key as an error. func (c *Context) GetError(key any) error { return getTyped[error](c, key) } // GetIntSlice returns the value associated with the key as a slice of integers. func (c *Context) GetIntSlice(key any) []int { return getTyped[[]int](c, key) } // GetInt8Slice returns the value associated with the key as a slice of int8 integers. func (c *Context) GetInt8Slice(key any) []int8 { return getTyped[[]int8](c, key) } // GetInt16Slice returns the value associated with the key as a slice of int16 integers. func (c *Context) GetInt16Slice(key any) []int16 { return getTyped[[]int16](c, key) } // GetInt32Slice returns the value associated with the key as a slice of int32 integers. func (c *Context) GetInt32Slice(key any) []int32 { return getTyped[[]int32](c, key) } // GetInt64Slice returns the value associated with the key as a slice of int64 integers. func (c *Context) GetInt64Slice(key any) []int64 { return getTyped[[]int64](c, key) } // GetUintSlice returns the value associated with the key as a slice of unsigned integers. func (c *Context) GetUintSlice(key any) []uint { return getTyped[[]uint](c, key) } // GetUint8Slice returns the value associated with the key as a slice of uint8 integers. func (c *Context) GetUint8Slice(key any) []uint8 { return getTyped[[]uint8](c, key) } // GetUint16Slice returns the value associated with the key as a slice of uint16 integers. func (c *Context) GetUint16Slice(key any) []uint16 { return getTyped[[]uint16](c, key) } // GetUint32Slice returns the value associated with the key as a slice of uint32 integers. func (c *Context) GetUint32Slice(key any) []uint32 { return getTyped[[]uint32](c, key) } // GetUint64Slice returns the value associated with the key as a slice of uint64 integers. func (c *Context) GetUint64Slice(key any) []uint64 { return getTyped[[]uint64](c, key) } // GetFloat32Slice returns the value associated with the key as a slice of float32 numbers. func (c *Context) GetFloat32Slice(key any) []float32 { return getTyped[[]float32](c, key) } // GetFloat64Slice returns the value associated with the key as a slice of float64 numbers. func (c *Context) GetFloat64Slice(key any) []float64 { return getTyped[[]float64](c, key) } // GetStringSlice returns the value associated with the key as a slice of strings. func (c *Context) GetStringSlice(key any) []string { return getTyped[[]string](c, key) } // GetErrorSlice returns the value associated with the key as a slice of errors. func (c *Context) GetErrorSlice(key any) []error { return getTyped[[]error](c, key) } // GetStringMap returns the value associated with the key as a map of interfaces. func (c *Context) GetStringMap(key any) map[string]any { return getTyped[map[string]any](c, key) } // GetStringMapString returns the value associated with the key as a map of strings. func (c *Context) GetStringMapString(key any) map[string]string { return getTyped[map[string]string](c, key) } // GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. func (c *Context) GetStringMapStringSlice(key any) map[string][]string { return getTyped[map[string][]string](c, key) } // Delete deletes the key from the Context's Key map, if it exists. // This operation is safe to be used by concurrent go-routines func (c *Context) Delete(key any) { c.mu.Lock() defer c.mu.Unlock() if c.Keys != nil { delete(c.Keys, key) } } /************************************/ /************ INPUT DATA ************/ /************************************/ // Param returns the value of the URL param. // It is a shortcut for c.Params.ByName(key) // // router.GET("/user/:id", func(c *gin.Context) { // // a GET request to /user/john // id := c.Param("id") // id == "john" // // a GET request to /user/john/ // id := c.Param("id") // id == "/john/" // }) func (c *Context) Param(key string) string { return c.Params.ByName(key) } // AddParam adds param to context and // replaces path param key with given value for e2e testing purposes // Example Route: "/user/:id" // AddParam("id", 1) // Result: "/user/1" func (c *Context) AddParam(key, value string) { c.Params = append(c.Params, Param{Key: key, Value: value}) } // Query returns the keyed url query value if it exists, // otherwise it returns an empty string `("")`. // It is shortcut for `c.Request.URL.Query().Get(key)` // // GET /path?id=1234&name=Manu&value= // c.Query("id") == "1234" // c.Query("name") == "Manu" // c.Query("value") == "" // c.Query("wtf") == "" func (c *Context) Query(key string) (value string) { value, _ = c.GetQuery(key) return } // DefaultQuery returns the keyed url query value if it exists, // otherwise it returns the specified defaultValue string. // See: Query() and GetQuery() for further information. // // GET /?name=Manu&lastname= // c.DefaultQuery("name", "unknown") == "Manu" // c.DefaultQuery("id", "none") == "none" // c.DefaultQuery("lastname", "none") == "" func (c *Context) DefaultQuery(key, defaultValue string) string { if value, ok := c.GetQuery(key); ok { return value } return defaultValue } // GetQuery is like Query(), it returns the keyed url query value // if it exists `(value, true)` (even when the value is an empty string), // otherwise it returns `("", false)`. // It is shortcut for `c.Request.URL.Query().Get(key)` // // GET /?name=Manu&lastname= // ("Manu", true) == c.GetQuery("name") // ("", false) == c.GetQuery("id") // ("", true) == c.GetQuery("lastname") func (c *Context) GetQuery(key string) (string, bool) { if values, ok := c.GetQueryArray(key); ok { return values[0], ok } return "", false } // QueryArray returns a slice of strings for a given query key. // The length of the slice depends on the number of params with the given key. func (c *Context) QueryArray(key string) (values []string) { values, _ = c.GetQueryArray(key) return } func (c *Context) initQueryCache() { if c.queryCache == nil { if c.Request != nil && c.Request.URL != nil { c.queryCache = c.Request.URL.Query() } else { c.queryCache = url.Values{} } } } // GetQueryArray returns a slice of strings for a given query key, plus // a boolean value whether at least one value exists for the given key. func (c *Context) GetQueryArray(key string) (values []string, ok bool) { c.initQueryCache() values, ok = c.queryCache[key] return } // QueryMap returns a map for a given query key. func (c *Context) QueryMap(key string) (dicts map[string]string) { dicts, _ = c.GetQueryMap(key) return } // GetQueryMap returns a map for a given query key, plus a boolean value // whether at least one value exists for the given key. func (c *Context) GetQueryMap(key string) (map[string]string, bool) { c.initQueryCache() return getMapFromFormData(c.queryCache, key) } // PostForm returns the specified key from a POST urlencoded form or multipart form // when it exists, otherwise it returns an empty string `("")`. func (c *Context) PostForm(key string) (value string) { value, _ = c.GetPostForm(key) return } // DefaultPostForm returns the specified key from a POST urlencoded form or multipart form // when it exists, otherwise it returns the specified defaultValue string. // See: PostForm() and GetPostForm() for further information. func (c *Context) DefaultPostForm(key, defaultValue string) string { if value, ok := c.GetPostForm(key); ok { return value } return defaultValue } // GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded // form or multipart form when it exists `(value, true)` (even when the value is an empty string), // otherwise it returns ("", false). // For example, during a PATCH request to update the user's email: // // email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" // email= --> ("", true) := GetPostForm("email") // set email to "" // --> ("", false) := GetPostForm("email") // do nothing with email func (c *Context) GetPostForm(key string) (string, bool) { if values, ok := c.GetPostFormArray(key); ok { return values[0], ok } return "", false } // PostFormArray returns a slice of strings for a given form key. // The length of the slice depends on the number of params with the given key. func (c *Context) PostFormArray(key string) (values []string) { values, _ = c.GetPostFormArray(key) return } func (c *Context) initFormCache() { if c.formCache == nil { c.formCache = make(url.Values) req := c.Request if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { if !errors.Is(err, http.ErrNotMultipart) { debugPrint("error on parse multipart form array: %v", err) } } c.formCache = req.PostForm } } // GetPostFormArray returns a slice of strings for a given form key, plus // a boolean value whether at least one value exists for the given key. func (c *Context) GetPostFormArray(key string) (values []string, ok bool) { c.initFormCache() values, ok = c.formCache[key] return } // PostFormMap returns a map for a given form key. func (c *Context) PostFormMap(key string) (dicts map[string]string) { dicts, _ = c.GetPostFormMap(key) return } // GetPostFormMap returns a map for a given form key, plus a boolean value // whether at least one value exists for the given key. func (c *Context) GetPostFormMap(key string) (map[string]string, bool) { c.initFormCache() return getMapFromFormData(c.formCache, key) } // getMapFromFormData return a map which satisfies conditions. // It parses from data with bracket notation like "key[subkey]=value" into a map. func getMapFromFormData(m map[string][]string, key string) (map[string]string, bool) { d := make(map[string]string) found := false keyLen := len(key) for k, v := range m { if len(k) < keyLen+3 { // key + "[" + at least one char + "]" continue } if k[:keyLen] != key || k[keyLen] != '[' { continue } if j := strings.IndexByte(k[keyLen+1:], ']'); j > 0 { found = true d[k[keyLen+1:keyLen+1+j]] = v[0] } } return d, found } // FormFile returns the first file for the provided form key. func (c *Context) FormFile(name string) (*multipart.FileHeader, error) { if c.Request.MultipartForm == nil { if err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { return nil, err } } f, fh, err := c.Request.FormFile(name) if err != nil { return nil, err } f.Close() return fh, err } // MultipartForm is the parsed multipart form, including file uploads. func (c *Context) MultipartForm() (*multipart.Form, error) { err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory) return c.Request.MultipartForm, err } // SaveUploadedFile uploads the form file to specific dst. func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string, perm ...fs.FileMode) error { src, err := file.Open() if err != nil { return err } defer src.Close() var mode os.FileMode = 0o750 if len(perm) > 0 { mode = perm[0] } dir := filepath.Dir(dst) if err = os.MkdirAll(dir, mode); err != nil { return err } if err = os.Chmod(dir, mode); err != nil { return err } out, err := os.Create(dst) if err != nil { return err } defer out.Close() _, err = io.Copy(out, src) return err } // Bind checks the Method and Content-Type to select a binding engine automatically, // Depending on the "Content-Type" header different bindings are used, for example: // // "application/json" --> JSON binding // "application/xml" --> XML binding // // It parses the request's body based on the Content-Type (e.g., JSON or XML). // It decodes the payload into the struct specified as a pointer. // It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. func (c *Context) Bind(obj any) error { b := binding.Default(c.Request.Method, c.ContentType()) return c.MustBindWith(obj, b) } // BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON). func (c *Context) BindJSON(obj any) error { return c.MustBindWith(obj, binding.JSON) } // BindXML is a shortcut for c.MustBindWith(obj, binding.BindXML). func (c *Context) BindXML(obj any) error { return c.MustBindWith(obj, binding.XML) } // BindQuery is a shortcut for c.MustBindWith(obj, binding.Query). func (c *Context) BindQuery(obj any) error { return c.MustBindWith(obj, binding.Query) } // BindYAML is a shortcut for c.MustBindWith(obj, binding.YAML). func (c *Context) BindYAML(obj any) error { return c.MustBindWith(obj, binding.YAML) } // BindTOML is a shortcut for c.MustBindWith(obj, binding.TOML). func (c *Context) BindTOML(obj any) error { return c.MustBindWith(obj, binding.TOML) } // BindPlain is a shortcut for c.MustBindWith(obj, binding.Plain). func (c *Context) BindPlain(obj any) error { return c.MustBindWith(obj, binding.Plain) } // BindHeader is a shortcut for c.MustBindWith(obj, binding.Header). func (c *Context) BindHeader(obj any) error { return c.MustBindWith(obj, binding.Header) } // BindUri binds the passed struct pointer using binding.Uri. // It will abort the request with HTTP 400 if any error occurs. func (c *Context) BindUri(obj any) error { if err := c.ShouldBindUri(obj); err != nil { c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) //nolint: errcheck return err } return nil } // MustBindWith binds the passed struct pointer using the specified binding engine. // It will abort the request with HTTP 400 if any error occurs. // See the binding package. func (c *Context) MustBindWith(obj any, b binding.Binding) error { err := c.ShouldBindWith(obj, b) if err != nil { var maxBytesErr *http.MaxBytesError // Note: When using sonic or go-json as JSON encoder, they do not propagate the http.MaxBytesError error // https://github.com/goccy/go-json/issues/485 // https://github.com/bytedance/sonic/issues/800 switch { case errors.As(err, &maxBytesErr): c.AbortWithError(http.StatusRequestEntityTooLarge, err).SetType(ErrorTypeBind) //nolint: errcheck default: c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) //nolint: errcheck } return err } return nil } // ShouldBind checks the Method and Content-Type to select a binding engine automatically, // Depending on the "Content-Type" header different bindings are used, for example: // // "application/json" --> JSON binding // "application/xml" --> XML binding // // It parses the request's body based on the Content-Type (e.g., JSON or XML). // It decodes the payload into the struct specified as a pointer. // Like c.Bind() but this method does not set the response status code to 400 or abort if input is not valid. func (c *Context) ShouldBind(obj any) error { b := binding.Default(c.Request.Method, c.ContentType()) return c.ShouldBindWith(obj, b) } // ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON). // // Example: // // POST /user // Content-Type: application/json // // Request Body: // { // "name": "Manu", // "age": 20 // } // // type User struct { // Name string `json:"name"` // Age int `json:"age"` // } // // var user User // if err := c.ShouldBindJSON(&user); err != nil { // c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) // return // } // c.JSON(http.StatusOK, user) func (c *Context) ShouldBindJSON(obj any) error { return c.ShouldBindWith(obj, binding.JSON) } // ShouldBindXML is a shortcut for c.ShouldBindWith(obj, binding.XML). // It works like ShouldBindJSON but binds the request body as XML data. func (c *Context) ShouldBindXML(obj any) error { return c.ShouldBindWith(obj, binding.XML) } // ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query). // It works like ShouldBindJSON but binds query parameters from the URL. func (c *Context) ShouldBindQuery(obj any) error { return c.ShouldBindWith(obj, binding.Query) } // ShouldBindYAML is a shortcut for c.ShouldBindWith(obj, binding.YAML). // It works like ShouldBindJSON but binds the request body as YAML data. func (c *Context) ShouldBindYAML(obj any) error { return c.ShouldBindWith(obj, binding.YAML) } // ShouldBindTOML is a shortcut for c.ShouldBindWith(obj, binding.TOML). // It works like ShouldBindJSON but binds the request body as TOML data. func (c *Context) ShouldBindTOML(obj any) error { return c.ShouldBindWith(obj, binding.TOML) } // ShouldBindPlain is a shortcut for c.ShouldBindWith(obj, binding.Plain). // It works like ShouldBindJSON but binds plain text data from the request body. func (c *Context) ShouldBindPlain(obj any) error { return c.ShouldBindWith(obj, binding.Plain) } // ShouldBindHeader is a shortcut for c.ShouldBindWith(obj, binding.Header). // It works like ShouldBindJSON but binds values from HTTP headers. func (c *Context) ShouldBindHeader(obj any) error { return c.ShouldBindWith(obj, binding.Header) } // ShouldBindUri binds the passed struct pointer using the specified binding engine. // It works like ShouldBindJSON but binds parameters from the URI. func (c *Context) ShouldBindUri(obj any) error { m := make(map[string][]string, len(c.Params)) for _, v := range c.Params { m[v.Key] = []string{v.Value} } return binding.Uri.BindUri(m, obj) } // ShouldBindWith binds the passed struct pointer using the specified binding engine. // See the binding package. func (c *Context) ShouldBindWith(obj any, b binding.Binding) error { return b.Bind(c.Request, obj) } // ShouldBindBodyWith is similar with ShouldBindWith, but it stores the request // body into the context, and reuse when it is called again. // // NOTE: This method reads the body before binding. So you should use // ShouldBindWith for better performance if you need to call only once. func (c *Context) ShouldBindBodyWith(obj any, bb binding.BindingBody) (err error) { var body []byte if cb, ok := c.Get(BodyBytesKey); ok { if cbb, ok := cb.([]byte); ok { body = cbb } } if body == nil { body, err = io.ReadAll(c.Request.Body) if err != nil { return err } c.Set(BodyBytesKey, body) } return bb.BindBody(body, obj) } // ShouldBindBodyWithJSON is a shortcut for c.ShouldBindBodyWith(obj, binding.JSON). func (c *Context) ShouldBindBodyWithJSON(obj any) error { return c.ShouldBindBodyWith(obj, binding.JSON) } // ShouldBindBodyWithXML is a shortcut for c.ShouldBindBodyWith(obj, binding.XML). func (c *Context) ShouldBindBodyWithXML(obj any) error { return c.ShouldBindBodyWith(obj, binding.XML) } // ShouldBindBodyWithYAML is a shortcut for c.ShouldBindBodyWith(obj, binding.YAML). func (c *Context) ShouldBindBodyWithYAML(obj any) error { return c.ShouldBindBodyWith(obj, binding.YAML) } // ShouldBindBodyWithTOML is a shortcut for c.ShouldBindBodyWith(obj, binding.TOML). func (c *Context) ShouldBindBodyWithTOML(obj any) error { return c.ShouldBindBodyWith(obj, binding.TOML) } // ShouldBindBodyWithPlain is a shortcut for c.ShouldBindBodyWith(obj, binding.Plain). func (c *Context) ShouldBindBodyWithPlain(obj any) error { return c.ShouldBindBodyWith(obj, binding.Plain) } // ClientIP implements one best effort algorithm to return the real client IP. // It calls c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. // If it is it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-IP]). // If the headers are not syntactically valid OR the remote IP does not correspond to a trusted proxy, // the remote IP (coming from Request.RemoteAddr) is returned. func (c *Context) ClientIP() string { // Check if we're running on a trusted platform, continue running backwards if error if c.engine.TrustedPlatform != "" { // Developers can define their own header of Trusted Platform or use predefined constants if addr := c.requestHeader(c.engine.TrustedPlatform); addr != "" { return addr } } // Legacy "AppEngine" flag if c.engine.AppEngine { log.Println(`The AppEngine flag is going to be deprecated. Please check issues #2723 and #2739 and use 'TrustedPlatform: gin.PlatformGoogleAppEngine' instead.`) if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { return addr } } var ( trusted bool remoteIP net.IP ) // If gin is listening a unix socket, always trust it. localAddr, ok := c.Request.Context().Value(http.LocalAddrContextKey).(net.Addr) if ok && strings.HasPrefix(localAddr.Network(), "unix") { trusted = true } // Fallback if !trusted { // It also checks if the remoteIP is a trusted proxy or not. // In order to perform this validation, it will see if the IP is contained within at least one of the CIDR blocks // defined by Engine.SetTrustedProxies() remoteIP = net.ParseIP(c.RemoteIP()) if remoteIP == nil { return "" } trusted = c.engine.isTrustedProxy(remoteIP) } if trusted && c.engine.ForwardedByClientIP && c.engine.RemoteIPHeaders != nil { for _, headerName := range c.engine.RemoteIPHeaders { headerValue := strings.Join(c.Request.Header.Values(headerName), ",") ip, valid := c.engine.validateHeader(headerValue) if valid { return ip } } } return remoteIP.String() } // RemoteIP parses the IP from Request.RemoteAddr, normalizes and returns the IP (without the port). func (c *Context) RemoteIP() string { ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)) if err != nil { return "" } return ip } // ContentType returns the Content-Type header of the request. func (c *Context) ContentType() string { return filterFlags(c.requestHeader("Content-Type")) } // IsWebsocket returns true if the request headers indicate that a websocket // handshake is being initiated by the client. func (c *Context) IsWebsocket() bool { if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") && strings.EqualFold(c.requestHeader("Upgrade"), "websocket") { return true } return false } func (c *Context) requestHeader(key string) string { return c.Request.Header.Get(key) } /************************************/ /******** RESPONSE RENDERING ********/ /************************************/ // bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. func bodyAllowedForStatus(status int) bool { switch { case status >= http.StatusContinue && status < http.StatusOK: return false case status == http.StatusNoContent: return false case status == http.StatusNotModified: return false } return true } // Status sets the HTTP response code. func (c *Context) Status(code int) { c.Writer.WriteHeader(code) } // Header is an intelligent shortcut for c.Writer.Header().Set(key, value). // It writes a header in the response. // If value == "", this method removes the header `c.Writer.Header().Del(key)` func (c *Context) Header(key, value string) { if value == "" { c.Writer.Header().Del(key) return } c.Writer.Header().Set(key, value) } // GetHeader returns value from request headers. func (c *Context) GetHeader(key string) string { return c.requestHeader(key) } // GetRawData returns stream data. func (c *Context) GetRawData() ([]byte, error) { if c.Request.Body == nil { return nil, errors.New("cannot read nil body") } return io.ReadAll(c.Request.Body) } // SetSameSite with cookie func (c *Context) SetSameSite(samesite http.SameSite) { c.sameSite = samesite } // SetCookie adds a Set-Cookie header to the ResponseWriter's headers. // The provided cookie must have a valid Name. Invalid cookies may be // silently dropped. func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) { if path == "" { path = "/" } http.SetCookie(c.Writer, &http.Cookie{ Name: name, Value: url.QueryEscape(value), MaxAge: maxAge, Path: path, Domain: domain, SameSite: c.sameSite, Secure: secure, HttpOnly: httpOnly, }) } // SetCookieData adds a Set-Cookie header to the ResponseWriter's headers. // It accepts a pointer to http.Cookie structure for more flexibility in setting cookie attributes. // The provided cookie must have a valid Name. Invalid cookies may be silently dropped. func (c *Context) SetCookieData(cookie *http.Cookie) { if cookie.Path == "" { cookie.Path = "/" } if cookie.SameSite == http.SameSiteDefaultMode { cookie.SameSite = c.sameSite } http.SetCookie(c.Writer, cookie) } // Cookie returns the named cookie provided in the request or // ErrNoCookie if not found. And return the named cookie is unescaped. // If multiple cookies match the given name, only one cookie will // be returned. func (c *Context) Cookie(name string) (string, error) { cookie, err := c.Request.Cookie(name) if err != nil { return "", err } val, _ := url.QueryUnescape(cookie.Value) return val, nil } // Render writes the response headers and calls render.Render to render data. func (c *Context) Render(code int, r render.Render) { c.Status(code) if !bodyAllowedForStatus(code) { r.WriteContentType(c.Writer) c.Writer.WriteHeaderNow() return } if err := r.Render(c.Writer); err != nil { // Pushing error to c.Errors _ = c.Error(err) c.Abort() } } // HTML renders the HTTP template specified by its file name. // It also updates the HTTP code and sets the Content-Type as "text/html". // See http://golang.org/doc/articles/wiki/ func (c *Context) HTML(code int, name string, obj any) { instance := c.engine.HTMLRender.Instance(name, obj) c.Render(code, instance) } // IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body. // It also sets the Content-Type as "application/json". // WARNING: we recommend using this only for development purposes since printing pretty JSON is // more CPU and bandwidth consuming. Use Context.JSON() instead. func (c *Context) IndentedJSON(code int, obj any) { c.Render(code, render.IndentedJSON{Data: obj}) } // SecureJSON serializes the given struct as Secure JSON into the response body. // Default prepends "while(1)," to response body if the given struct is array values. // It also sets the Content-Type as "application/json". func (c *Context) SecureJSON(code int, obj any) { c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) } // JSONP serializes the given struct as JSON into the response body. // It adds padding to response body to request data from a server residing in a different domain than the client. // It also sets the Content-Type as "application/javascript". func (c *Context) JSONP(code int, obj any) { callback := c.DefaultQuery("callback", "") if callback == "" { c.Render(code, render.JSON{Data: obj}) return } c.Render(code, render.JsonpJSON{Callback: callback, Data: obj}) } // JSON serializes the given struct as JSON into the response body. // It also sets the Content-Type as "application/json". func (c *Context) JSON(code int, obj any) { c.Render(code, render.JSON{Data: obj}) } // AsciiJSON serializes the given struct as JSON into the response body with unicode to ASCII string. // It also sets the Content-Type as "application/json". func (c *Context) AsciiJSON(code int, obj any) { c.Render(code, render.AsciiJSON{Data: obj}) } // PureJSON serializes the given struct as JSON into the response body. // PureJSON, unlike JSON, does not replace special html characters with their unicode entities. func (c *Context) PureJSON(code int, obj any) { c.Render(code, render.PureJSON{Data: obj}) } // XML serializes the given struct as XML into the response body. // It also sets the Content-Type as "application/xml". func (c *Context) XML(code int, obj any) { c.Render(code, render.XML{Data: obj}) } // YAML serializes the given struct as YAML into the response body. func (c *Context) YAML(code int, obj any) { c.Render(code, render.YAML{Data: obj}) } // TOML serializes the given struct as TOML into the response body. func (c *Context) TOML(code int, obj any) { c.Render(code, render.TOML{Data: obj}) } // ProtoBuf serializes the given struct as ProtoBuf into the response body. func (c *Context) ProtoBuf(code int, obj any) { c.Render(code, render.ProtoBuf{Data: obj}) } // BSON serializes the given struct as BSON into the response body. func (c *Context) BSON(code int, obj any) { c.Render(code, render.BSON{Data: obj}) } // String writes the given string into the response body. func (c *Context) String(code int, format string, values ...any) { c.Render(code, render.String{Format: format, Data: values}) } // Redirect returns an HTTP redirect to the specific location. func (c *Context) Redirect(code int, location string) { c.Render(-1, render.Redirect{ Code: code, Location: location, Request: c.Request, }) } // Data writes some data into the body stream and updates the HTTP code. func (c *Context) Data(code int, contentType string, data []byte) { c.Render(code, render.Data{ ContentType: contentType, Data: data, }) } // DataFromReader writes the specified reader into the body stream and updates the HTTP code. func (c *Context) DataFromReader(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) { c.Render(code, render.Reader{ Headers: extraHeaders, ContentType: contentType, ContentLength: contentLength, Reader: reader, }) } // File writes the specified file into the body stream in an efficient way. func (c *Context) File(filepath string) { http.ServeFile(c.Writer, c.Request, filepath) } // FileFromFS writes the specified file from http.FileSystem into the body stream in an efficient way. func (c *Context) FileFromFS(filepath string, fs http.FileSystem) { defer func(old string) { c.Request.URL.Path = old }(c.Request.URL.Path) c.Request.URL.Path = filepath http.FileServer(fs).ServeHTTP(c.Writer, c.Request) } var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") func escapeQuotes(s string) string { return quoteEscaper.Replace(s) } // FileAttachment writes the specified file into the body stream in an efficient way // On the client side, the file will typically be downloaded with the given filename func (c *Context) FileAttachment(filepath, filename string) { if isASCII(filename) { c.Writer.Header().Set("Content-Disposition", `attachment; filename="`+escapeQuotes(filename)+`"`) } else { c.Writer.Header().Set("Content-Disposition", `attachment; filename*=UTF-8''`+url.QueryEscape(filename)) } http.ServeFile(c.Writer, c.Request, filepath) } // SSEvent writes a Server-Sent Event into the body stream. func (c *Context) SSEvent(name string, message any) { c.Render(-1, sse.Event{ Event: name, Data: message, }) } // Stream sends a streaming response and returns a boolean // indicates "Is client disconnected in middle of stream" func (c *Context) Stream(step func(w io.Writer) bool) bool { w := c.Writer clientGone := w.CloseNotify() for { select { case <-clientGone: return true default: keepOpen := step(w) w.Flush() if !keepOpen { return false } } } } /************************************/ /******** CONTENT NEGOTIATION *******/ /************************************/ // Negotiate contains all negotiations data. type Negotiate struct { Offered []string HTMLName string HTMLData any JSONData any XMLData any YAMLData any Data any TOMLData any PROTOBUFData any BSONData any } // Negotiate calls different Render according to acceptable Accept format. func (c *Context) Negotiate(code int, config Negotiate) { switch c.NegotiateFormat(config.Offered...) { case binding.MIMEJSON: data := chooseData(config.JSONData, config.Data) c.JSON(code, data) case binding.MIMEHTML: data := chooseData(config.HTMLData, config.Data) c.HTML(code, config.HTMLName, data) case binding.MIMEXML: data := chooseData(config.XMLData, config.Data) c.XML(code, data) case binding.MIMEYAML, binding.MIMEYAML2: data := chooseData(config.YAMLData, config.Data) c.YAML(code, data) case binding.MIMETOML: data := chooseData(config.TOMLData, config.Data) c.TOML(code, data) case binding.MIMEPROTOBUF: data := chooseData(config.PROTOBUFData, config.Data) c.ProtoBuf(code, data) case binding.MIMEBSON: data := chooseData(config.BSONData, config.Data) c.BSON(code, data) default: c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) //nolint: errcheck } } // NegotiateFormat returns an acceptable Accept format. func (c *Context) NegotiateFormat(offered ...string) string { assert1(len(offered) > 0, "you must provide at least one offer") if c.Accepted == nil { c.Accepted = parseAccept(c.requestHeader("Accept")) } if len(c.Accepted) == 0 { return offered[0] } for _, accepted := range c.Accepted { for _, offer := range offered { // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers, // therefore we can just iterate over the string without casting it into []rune i := 0 for ; i < len(accepted) && i < len(offer); i++ { if accepted[i] == '*' || offer[i] == '*' { return offer } if accepted[i] != offer[i] { break } } if i == len(accepted) { return offer } } } return "" } // SetAccepted sets Accept header data. func (c *Context) SetAccepted(formats ...string) { c.Accepted = formats } /************************************/ /***** GOLANG.ORG/X/NET/CONTEXT *****/ /************************************/ // hasRequestContext returns whether c.Request has Context and fallback. func (c *Context) hasRequestContext() bool { hasFallback := c.engine != nil && c.engine.ContextWithFallback hasRequestContext := c.Request != nil && c.Request.Context() != nil return hasFallback && hasRequestContext } // Deadline returns that there is no deadline (ok==false) when c.Request has no Context. func (c *Context) Deadline() (deadline time.Time, ok bool) { if !c.hasRequestContext() { return } return c.Request.Context().Deadline() } // Done returns nil (chan which will wait forever) when c.Request has no Context. func (c *Context) Done() <-chan struct{} { if !c.hasRequestContext() { return nil } return c.Request.Context().Done() } // Err returns nil when c.Request has no Context. func (c *Context) Err() error { if !c.hasRequestContext() { return nil } return c.Request.Context().Err() } // Value returns the value associated with this context for key, or nil // if no value is associated with key. Successive calls to Value with // the same key returns the same result. func (c *Context) Value(key any) any { if key == ContextRequestKey { return c.Request } if key == ContextKey { return c } if keyAsString, ok := key.(string); ok { if val, exists := c.Get(keyAsString); exists { return val } } if !c.hasRequestContext() { return nil } return c.Request.Context().Value(key) }
go
github
https://github.com/gin-gonic/gin
context.go
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls.defaults import include # noqa from django.conf.urls.defaults import patterns # noqa from django.conf.urls.defaults import url # noqa from openstack_dashboard.dashboards.project.networks.ports \ import urls as port_urls from openstack_dashboard.dashboards.project.networks.ports \ import views as port_views from openstack_dashboard.dashboards.project.networks.subnets \ import urls as subnet_urls from openstack_dashboard.dashboards.project.networks.subnets \ import views as subnet_views from openstack_dashboard.dashboards.project.networks import views NETWORKS = r'^(?P<network_id>[^/]+)/%s$' urlpatterns = patterns('', url(r'^$', views.IndexView.as_view(), name='index'), url(r'^create$', views.CreateView.as_view(), name='create'), url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'), url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'), url(NETWORKS % 'subnets/create', subnet_views.CreateView.as_view(), name='addsubnet'), url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$', subnet_views.UpdateView.as_view(), name='editsubnet'), url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$', port_views.UpdateView.as_view(), name='editport'), url(r'^subnets/', include(subnet_urls, namespace='subnets')), url(r'^ports/', include(port_urls, namespace='ports')))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import sys, os, re, json import glob, shutil import time ################ # Sanity check # ################ import numpy as np fail = 0 counter = 0 testcases = [] from functools import wraps import traceback def prompt(msg): yn = raw_input(msg + " [y/n]: ") return yn.lower().startswith('y') class testcase(object): def __init__(self, name): self.name = name def __call__(self, func): global testcases @wraps(func) def wrapper(): global counter global fail counter += 1 print ">> Test %d (%s)" % (counter, self.name) try: func() print "[ok] Passed test %d (%s)" % (counter, self.name) except Exception as e: fail += 1 print "[!!] Error on test %d (%s):" % (counter, self.name) traceback.print_exc() testcases.append(wrapper) return wrapper ## # Part 0 ## # Part 1 @testcase("Part1: test random_weight_matrix") def test_random_weight_matrix(): from misc import random_weight_matrix A = random_weight_matrix(100,100) assert(A.shape == (100,100)) @testcase("Part1: initialize window model") def ner_init(): from nerwindow import WindowMLP np.random.seed(10) wv = np.random.randn(20,10) clf = WindowMLP(wv, windowsize=3, dims = [None, 15, 3], rseed=10) @testcase("Part1: test predict_proba()") def ner_predict_proba(): from nerwindow import WindowMLP np.random.seed(10) wv = np.random.randn(20,10) clf = WindowMLP(wv, windowsize=3, dims = [None, 15, 3], rseed=10) p = clf.predict_proba([1,2,3]) assert(len(p.flatten()) == 3) p = clf.predict_proba([[1,2,3], [2,3,4]]) assert(np.ndim(p) == 2) assert(p.shape == (2,3)) @testcase("Part1: test compute_loss()") def ner_predict_proba(): from nerwindow import WindowMLP np.random.seed(10) wv = np.random.randn(20,10) clf = WindowMLP(wv, windowsize=3, dims = [None, 15, 3], rseed=10) J = clf.compute_loss([1,2,3], 1) print " dummy: J = %g" % J J = clf.compute_loss([[1,2,3], [2,3,4]], [0,1]) print " dummy: J = %g" % J @testcase("Part1: NER prediction - dev set") def ner_pred_dev(): devpred = np.loadtxt("dev.predicted", dtype=int) assert(len(devpred) == 51362) # dev set length @testcase("Part1: NER prediction - test set") def ner_pred_test(): testpred = np.loadtxt("test.predicted", dtype=int) assert(len(testpred) == 46435) def setup_probing(): num_to_word = dict(enumerate( ["hello", "world", "i", "am", "a", "banana", "there", "is", "no", "spoon"])) tagnames = ["O", "LOC", "MISC", "ORG", "PER"] num_to_tag = dict(enumerate(tagnames)) from nerwindow import WindowMLP np.random.seed(10) wv = np.random.randn(10,50) clf = WindowMLP(wv, windowsize=3, dims = [None, 100, 5], rseed=10) return clf, num_to_word, num_to_tag @testcase("Part1.1 (a): verify output format") def ner_probe_a(): from part11probing import part_a, part_b, part_c clf, num_to_word, num_to_tag = setup_probing() s,w = part_a(clf, num_to_word, verbose=False) assert(len(s) == len(w)) if type(s) == dict: # some students may have done this for k in s.keys(): assert(k in w) for k in w.keys(): assert(k in s) assert(len(s) >= 5) else: # list assert(len(s[0]) == len(w[0])) assert(len(s[0]) == 10) assert(type(w[0][0]) == str) @testcase("Part1.1 (b): verify output format") def ner_probe_b(): from part11probing import part_a, part_b, part_c clf, num_to_word, num_to_tag = setup_probing() s,w = part_b(clf, num_to_word, num_to_tag, verbose=False) assert(len(s) == len(w)) assert(len(s) == 5) assert(len(s[0]) == len(w[0])) assert(len(s[0]) == 10) assert(type(w[0][0]) == str) @testcase("Part1.1 (c): verify output format") def ner_probe_b(): from part11probing import part_a, part_b, part_c clf, num_to_word, num_to_tag = setup_probing() s,w = part_c(clf, num_to_word, num_to_tag, verbose=False) assert(len(s) == len(w)) assert(len(s) == 5) assert(len(s[0]) == len(w[0])) assert(len(s[0]) == 10) assert(type(w[0][0]) == str) ## # Part 2 @testcase("Part2: initialize RNNLM") def rnnlm_init(): from rnnlm import RNNLM np.random.seed(10) L = np.random.randn(50,10) model = RNNLM(L0 = L) @testcase("Part2: load RNNLM params") def rnnlm_load(): from rnnlm import RNNLM L = np.load('rnnlm.L.npy') print " loaded L: %s" % str(L.shape) H = np.load('rnnlm.H.npy') print " loaded H: %s" % str(H.shape) U = np.load('rnnlm.U.npy') print " loaded U: %s" % str(U.shape) assert(L.shape[0] == U.shape[0]) assert(L.shape[1] == H.shape[1]) assert(H.shape[0] == U.shape[1]) model = RNNLM(L0 = L, U0 = U) model.params.H[:] = H @testcase("Part2: test generate_sequence") def rnnlm_generate_sequence(): from rnnlm import RNNLM np.random.seed(10) L = np.random.randn(20,10) model = RNNLM(L0 = L) model.H = np.random.randn(20,20) s, J = model.generate_sequence(0,1, maxlen=15) print "dummy J: %g" % J print "dummy seq: len(s) = %d" % len(s) assert(len(s) <= 15+1) assert(s[0] == 0) assert(J > 0) ## # Execute sanity check print "=== Running sanity check ===" for f in testcases: f() if fail <= 0: print "=== Sanity check passed! ===" else: print "=== Sanity check failed %d tests :( ===" % fail if not prompt("Continue submission anyway?"): sys.exit(1) ## # List of files for submission filelist = [ 'part0-XOR.ipynb', 'part1-NER.ipynb', 'misc.py', 'nerwindow.py', 'ner.learningcurve.best.png', 'ner.learningcurve.comparison.png', 'dev.predicted', 'test.predicted', 'part11probing.py', 'part2-RNNLM.ipynb', 'rnnlm.py', 'rnnlm.H.npy', 'rnnlm.L.npy', 'rnnlm.U.npy', ] files_ok = [] files_missing = [] # Verify required files present print "=== Verifying file list ===" for fname in filelist: print ("File: %s ? -" % fname), if os.path.isfile(fname): print "ok"; files_ok.append(fname) else: print "NOT FOUND"; files_missing.append(fname) if len(files_missing) > 0: print "== Error: missing files ==" print " ".join(files_missing) if not prompt("Continue submission anyway?"): sys.exit(1) ## # Prepare submission zip from zipfile import ZipFile # Get SUNet ID sunetid = "" fail = -1 while not re.match(r'[\w\d]+', sunetid): fail += 1 sunetid = raw_input("=== Please enter your SUNet ID ===\nSUNet ID: ").lower() if fail > 3: print "Error: invalid ID"; sys.exit(1) # Pack in files zipname = "%s.zip" % sunetid with ZipFile(zipname, 'w') as zf: print "=== Generating submission file '%s' ===" % zipname for fname in files_ok: print (" %s" % fname), zf.write(fname) print ("(%.02f kB)" % ((1.0/1024) * zf.getinfo(fname).file_size)) # Check size fsize = os.path.getsize(zipname) SIZE_LIMIT = 3*(2**30) # 30 MB print "Submission size: %.02f kB -" % ((1.0/1024) * fsize), if fsize < SIZE_LIMIT: print "ok!" else: print "too large! (limit = %.02f kB" % ((1.0/1024) * SIZE_LIMIT) sys.exit(1) print "=== Successfully generated submission zipfile! ===" print "Please upload '%s' to Box, and don't forget to submit your writeup PDF via Scoryst!" % zipname
unknown
codeparrot/codeparrot-clean
""" Move a file in the safest way possible:: >>> from django.core.files.move import file_move_safe >>> file_move_safe("/tmp/old_file", "/tmp/new_file") """ import os from shutil import copystat from django.core.files import locks __all__ = ['file_move_safe'] def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False): """ Move a file from one location to another in the safest way possible. First, try ``os.rename``, which is simple but will break across filesystems. If that fails, stream manually from one file to another in pure Python. If the destination file exists and ``allow_overwrite`` is ``False``, raise ``IOError``. """ # There's no reason to move if we don't have to. if _samefile(old_file_name, new_file_name): return try: # If the destination file exists and allow_overwrite is False then raise an IOError if not allow_overwrite and os.access(new_file_name, os.F_OK): raise IOError("Destination file %s exists and allow_overwrite is False" % new_file_name) os.rename(old_file_name, new_file_name) return except OSError: # This will happen with os.rename if moving to another filesystem # or when moving opened files on certain operating systems pass # first open the old file, so that it won't go away with open(old_file_name, 'rb') as old_file: # now open the new file, not forgetting allow_overwrite fd = os.open(new_file_name, (os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0) | (os.O_EXCL if not allow_overwrite else 0))) try: locks.lock(fd, locks.LOCK_EX) current_chunk = None while current_chunk != b'': current_chunk = old_file.read(chunk_size) os.write(fd, current_chunk) finally: locks.unlock(fd) os.close(fd) copystat(old_file_name, new_file_name) try: os.remove(old_file_name) except PermissionError as e: # Certain operating systems (Cygwin and Windows) # fail when deleting opened files, ignore it. (For the # systems where this happens, temporary files will be auto-deleted # on close anyway.) if getattr(e, 'winerror', 0) != 32: raise
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from functools import partial import logging from lxml import etree from lxml.builder import E import openerp from openerp import SUPERUSER_ID from openerp import pooler, tools import openerp.exceptions from openerp.osv import fields,osv, expression from openerp.osv.orm import browse_record from openerp.tools.translate import _ _logger = logging.getLogger(__name__) class groups(osv.osv): _name = "res.groups" _description = "Access Groups" _rec_name = 'full_name' def _get_full_name(self, cr, uid, ids, field, arg, context=None): res = {} for g in self.browse(cr, uid, ids, context): if g.category_id: res[g.id] = '%s / %s' % (g.category_id.name, g.name) else: res[g.id] = g.name return res def _search_group(self, cr, uid, obj, name, args, context=None): operand = args[0][2] operator = args[0][1] lst = True if isinstance(operand, bool): domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]] if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand): return expression.AND(domains) else: return expression.OR(domains) if isinstance(operand, basestring): lst = False operand = [operand] where = [] for group in operand: values = filter(bool, group.split('/')) group_name = values.pop().strip() category_name = values and '/'.join(values).strip() or group_name group_domain = [('name', operator, lst and [group_name] or group_name)] category_domain = [('category_id.name', operator, lst and [category_name] or category_name)] if operator in expression.NEGATIVE_TERM_OPERATORS and not values: category_domain = expression.OR([category_domain, [('category_id', '=', False)]]) if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values): sub_where = expression.AND([group_domain, category_domain]) else: sub_where = expression.OR([group_domain, category_domain]) if operator in expression.NEGATIVE_TERM_OPERATORS: where = expression.AND([where, sub_where]) else: where = expression.OR([where, sub_where]) return where _columns = { 'name': fields.char('Name', size=64, required=True, translate=True), 'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'), 'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'), 'rule_groups': fields.many2many('ir.rule', 'rule_group_rel', 'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]), 'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'), 'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'), 'comment' : fields.text('Comment', size=250, translate=True), 'category_id': fields.many2one('ir.module.category', 'Application', select=True), 'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group), } _sql_constraints = [ ('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!') ] def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False): # add explicit ordering if search is sorted on full_name if order and order.startswith('full_name'): ids = super(groups, self).search(cr, uid, args, context=context) gs = self.browse(cr, uid, ids, context) gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC')) gs = gs[offset:offset+limit] if limit else gs[offset:] return map(int, gs) return super(groups, self).search(cr, uid, args, offset, limit, order, context, count) def copy(self, cr, uid, id, default=None, context=None): group_name = self.read(cr, uid, [id], ['name'])[0]['name'] default.update({'name': _('%s (copy)')%group_name}) return super(groups, self).copy(cr, uid, id, default, context) def write(self, cr, uid, ids, vals, context=None): if 'name' in vals: if vals['name'].startswith('-'): raise osv.except_osv(_('Error'), _('The name of the group can not start with "-"')) res = super(groups, self).write(cr, uid, ids, vals, context=context) self.pool.get('ir.model.access').call_cache_clearing_methods(cr) return res groups() class res_users(osv.osv): """ User class. A res.users record models an OpenERP user and is different from an employee. res.users class now inherits from res.partner. The partner model is used to store the data related to the partner: lang, name, address, avatar, ... The user model is now dedicated to technical data. """ __admin_ids = {} _uid_cache = {} _inherits = { 'res.partner': 'partner_id', } _name = "res.users" _description = 'Users' def _set_new_password(self, cr, uid, id, name, value, args, context=None): if value is False: # Do not update the password if no value is provided, ignore silently. # For example web client submits False values for all empty fields. return if uid == id: # To change their own password users must use the client-specific change password wizard, # so that the new password is immediately used for further RPC requests, otherwise the user # will face unexpected 'Access Denied' exceptions. raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.')) self.write(cr, uid, id, {'password': value}) def _get_password(self, cr, uid, ids, arg, karg, context=None): return dict.fromkeys(ids, '') _columns = { 'id': fields.integer('ID'), 'login_date': fields.date('Latest connection', select=1), 'partner_id': fields.many2one('res.partner', required=True, string='Related Partner', ondelete='restrict', help='Partner-related data of the user'), 'login': fields.char('Login', size=64, required=True, help="Used to log into the system"), 'password': fields.char('Password', size=64, invisible=True, help="Keep empty if you don't want the user to be able to connect on the system."), 'new_password': fields.function(_get_password, type='char', size=64, fnct_inv=_set_new_password, string='Set Password', help="Specify a value only when creating a user or if you're "\ "changing the user's password, otherwise leave empty. After "\ "a change of password, the user has to login again."), 'signature': fields.text('Signature'), 'active': fields.boolean('Active'), 'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at logon for this user, in addition to the standard menu."), 'menu_id': fields.many2one('ir.actions.actions', 'Menu Action', help="If specified, the action will replace the standard menu for this user."), 'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'), # Special behavior for this field: res.company.search() will only return the companies # available to the current user (should be the user's companies?), when the user_preference # context is set. 'company_id': fields.many2one('res.company', 'Company', required=True, help='The company this user is currently working for.', context={'user_preference': True}), 'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'), # backward compatibility fields 'user_email': fields.related('email', type='char', deprecated='Use the email field instead of user_email. This field will be removed with OpenERP 7.1.'), } def on_change_company_id(self, cr, uid, ids, company_id): return {'warning' : { 'title': _("Company Switch Warning"), 'message': _("Please keep in mind that documents currently displayed may not be relevant after switching to another company. If you have unsaved changes, please make sure to save and close all forms before switching to a different company. (You can click on Cancel in the User Preferences now)"), } } def onchange_state(self, cr, uid, ids, state_id, context=None): partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context) def onchange_type(self, cr, uid, ids, is_company, context=None): """ Wrapper on the user.partner onchange_type, because some calls to the partner form view applied to the user may trigger the partner.onchange_type method, but applied to the user object. """ partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool.get('res.partner').onchange_type(cr, uid, partner_ids, is_company, context=context) def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None): """ Wrapper on the user.partner onchange_address, because some calls to the partner form view applied to the user may trigger the partner.onchange_type method, but applied to the user object. """ partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)] return self.pool.get('res.partner').onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context) def _check_company(self, cr, uid, ids, context=None): return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context)) _constraints = [ (_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']), ] _sql_constraints = [ ('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !') ] def _get_company(self,cr, uid, context=None, uid2=False): if not uid2: uid2 = uid user = self.pool.get('res.users').read(cr, uid, uid2, ['company_id'], context) company_id = user.get('company_id', False) return company_id and company_id[0] or False def _get_companies(self, cr, uid, context=None): c = self._get_company(cr, uid, context) if c: return [c] return False def _get_menu(self,cr, uid, context=None): dataobj = self.pool.get('ir.model.data') try: model, res_id = dataobj.get_object_reference(cr, uid, 'base', 'action_menu_admin') if model != 'ir.actions.act_window': return False return res_id except ValueError: return False def _get_group(self,cr, uid, context=None): dataobj = self.pool.get('ir.model.data') result = [] try: dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user') result.append(group_id) dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager') result.append(group_id) except ValueError: # If these groups does not exists anymore pass return result _defaults = { 'password': '', 'active': True, 'customer': False, 'menu_id': _get_menu, 'company_id': _get_company, 'company_ids': _get_companies, 'groups_id': _get_group, 'image': lambda self, cr, uid, ctx={}: self.pool.get('res.partner')._get_default_image(cr, uid, False, ctx, colorize=True), } # User can write on a few of his own fields (but not his groups for example) SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz'] # User can read a few of his own fields SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update'] def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): def override_password(o): if 'password' in o and ('id' not in o or o['id'] != uid): o['password'] = '********' return o if fields and (ids == [uid] or ids == uid): for key in fields: if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')): break else: # safe fields only, so we read as super-user to bypass access rights uid = SUPERUSER_ID result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load) canwrite = self.pool.get('ir.model.access').check(cr, uid, 'res.users', 'write', False) if not canwrite: if isinstance(ids, (int, long)): result = override_password(result) else: result = map(override_password, result) return result def create(self, cr, uid, vals, context=None): user_id = super(res_users, self).create(cr, uid, vals, context=context) user = self.browse(cr, uid, user_id, context=context) if user.partner_id.company_id: user.partner_id.write({'company_id': user.company_id.id}) return user_id def write(self, cr, uid, ids, values, context=None): if not hasattr(ids, '__iter__'): ids = [ids] if ids == [uid]: for key in values.keys(): if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')): break else: if 'company_id' in values: if not (values['company_id'] in self.read(cr, SUPERUSER_ID, uid, ['company_ids'], context=context)['company_ids']): del values['company_id'] uid = 1 # safe fields only, so we write as super-user to bypass access rights res = super(res_users, self).write(cr, uid, ids, values, context=context) if 'company_id' in values: for user in self.browse(cr, uid, ids, context=context): # if partner is global we keep it that way if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']: user.partner_id.write({'company_id': user.company_id.id}) # clear caches linked to the users self.pool.get('ir.model.access').call_cache_clearing_methods(cr) clear = partial(self.pool.get('ir.rule').clear_cache, cr) map(clear, ids) db = cr.dbname if db in self._uid_cache: for id in ids: if id in self._uid_cache[db]: del self._uid_cache[db][id] self.context_get.clear_cache(self) return res def unlink(self, cr, uid, ids, context=None): if 1 in ids: raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by OpenERP (updates, module installation, ...)')) db = cr.dbname if db in self._uid_cache: for id in ids: if id in self._uid_cache[db]: del self._uid_cache[db][id] return super(res_users, self).unlink(cr, uid, ids, context=context) def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args=[] if not context: context={} ids = [] if name and operator in ['=', 'ilike']: ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context) if not ids: ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context) return self.name_get(cr, user, ids, context=context) def copy(self, cr, uid, id, default=None, context=None): user2copy = self.read(cr, uid, [id], ['login','name'])[0] default = dict(default or {}) if ('name' not in default) and ('partner_id' not in default): default['name'] = _("%s (copy)") % user2copy['name'] if 'login' not in default: default['login'] = _("%s (copy)") % user2copy['login'] return super(res_users, self).copy(cr, uid, id, default, context) def copy_data(self, cr, uid, ids, default=None, context=None): if default is None: default = {} default.update({'login_date': False}) return super(res_users, self).copy_data(cr, uid, ids, default, context=context) @tools.ormcache(skiparg=2) def context_get(self, cr, uid, context=None): user = self.browse(cr, SUPERUSER_ID, uid, context) result = {} for k in self._all_columns.keys(): if k.startswith('context_'): context_key = k[8:] elif k in ['lang', 'tz']: context_key = k else: context_key = False if context_key: res = getattr(user,k) or False if isinstance(res, browse_record): res = res.id result[context_key] = res or False return result def action_get(self, cr, uid, context=None): dataobj = self.pool.get('ir.model.data') data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my') return dataobj.browse(cr, uid, data_id, context=context).res_id def check_super(self, passwd): if passwd == tools.config['admin_passwd']: return True else: raise openerp.exceptions.AccessDenied() def check_credentials(self, cr, uid, password): """ Override this method to plug additional authentication methods""" res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)]) if not res: raise openerp.exceptions.AccessDenied() def login(self, db, login, password): if not password: return False user_id = False cr = pooler.get_db(db).cursor() try: # autocommit: our single update request will be performed atomically. # (In this way, there is no opportunity to have two transactions # interleaving their cr.execute()..cr.commit() calls and have one # of them rolled back due to a concurrent access.) cr.autocommit(True) # check if user exists res = self.search(cr, SUPERUSER_ID, [('login','=',login)]) if res: user_id = res[0] # check credentials self.check_credentials(cr, user_id, password) # We effectively unconditionally write the res_users line. # Even w/ autocommit there's a chance the user row will be locked, # in which case we can't delay the login just for the purpose of # update the last login date - hence we use FOR UPDATE NOWAIT to # try to get the lock - fail-fast # Failing to acquire the lock on the res_users row probably means # another request is holding it. No big deal, we don't want to # prevent/delay login in that case. It will also have been logged # as a SQL error, if anyone cares. try: # NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299 update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE' cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False) cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,)) except Exception: _logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True) except openerp.exceptions.AccessDenied: _logger.info("Login failed for db:%s login:%s", db, login) user_id = False finally: cr.close() return user_id def authenticate(self, db, login, password, user_agent_env): """Verifies and returns the user ID corresponding to the given ``login`` and ``password`` combination, or False if there was no matching user. :param str db: the database on which user is trying to authenticate :param str login: username :param str password: user password :param dict user_agent_env: environment dictionary describing any relevant environment attributes """ uid = self.login(db, login, password) if uid == openerp.SUPERUSER_ID: # Successfully logged in as admin! # Attempt to guess the web base url... if user_agent_env and user_agent_env.get('base_location'): cr = pooler.get_db(db).cursor() try: base = user_agent_env['base_location'] ICP = self.pool.get('ir.config_parameter') if not ICP.get_param(cr, uid, 'web.base.url.freeze'): ICP.set_param(cr, uid, 'web.base.url', base) cr.commit() except Exception: _logger.exception("Failed to update web.base.url configuration parameter") finally: cr.close() return uid def check(self, db, uid, passwd): """Verifies that the given (uid, password) is authorized for the database ``db`` and raise an exception if it is not.""" if not passwd: # empty passwords disallowed for obvious security reasons raise openerp.exceptions.AccessDenied() if self._uid_cache.get(db, {}).get(uid) == passwd: return cr = pooler.get_db(db).cursor() try: self.check_credentials(cr, uid, passwd) if self._uid_cache.has_key(db): self._uid_cache[db][uid] = passwd else: self._uid_cache[db] = {uid:passwd} finally: cr.close() def change_password(self, cr, uid, old_passwd, new_passwd, context=None): """Change current user password. Old password must be provided explicitly to prevent hijacking an existing user session, or for cases where the cleartext password is not used to authenticate requests. :return: True :raise: openerp.exceptions.AccessDenied when old password is wrong :raise: except_osv when new password is not set or empty """ self.check(cr.dbname, uid, old_passwd) if new_passwd: return self.write(cr, uid, uid, {'password': new_passwd}) raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!")) def preference_save(self, cr, uid, ids, context=None): return { 'type': 'ir.actions.client', 'tag': 'reload', } def preference_change_password(self, cr, uid, ids, context=None): return { 'type': 'ir.actions.client', 'tag': 'change_password', 'target': 'new', } def has_group(self, cr, uid, group_ext_id): """Checks whether user belongs to given group. :param str group_ext_id: external ID (XML ID) of the group. Must be provided in fully-qualified form (``module.ext_id``), as there is no implicit module to use.. :return: True if the current user is a member of the group with the given external ID (XML ID), else False. """ assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified" module, ext_id = group_ext_id.split('.') cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN (SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""", (uid, module, ext_id)) return bool(cr.fetchone()) # # Extension of res.groups and res.users with a relation for "implied" or # "inherited" groups. Once a user belongs to a group, it automatically belongs # to the implied groups (transitively). # class cset(object): """ A cset (constrained set) is a set of elements that may be constrained to be a subset of other csets. Elements added to a cset are automatically added to its supersets. Cycles in the subset constraints are supported. """ def __init__(self, xs): self.supersets = set() self.elements = set(xs) def subsetof(self, other): if other is not self: self.supersets.add(other) other.update(self.elements) def update(self, xs): xs = set(xs) - self.elements if xs: # xs will eventually be empty in case of a cycle self.elements.update(xs) for s in self.supersets: s.update(xs) def __iter__(self): return iter(self.elements) def concat(ls): """ return the concatenation of a list of iterables """ res = [] for l in ls: res.extend(l) return res class groups_implied(osv.osv): _inherit = 'res.groups' def _get_trans_implied(self, cr, uid, ids, field, arg, context=None): "computes the transitive closure of relation implied_ids" memo = {} # use a memo for performance and cycle avoidance def computed_set(g): if g not in memo: memo[g] = cset(g.implied_ids) for h in g.implied_ids: computed_set(h).subsetof(memo[g]) return memo[g] res = {} for g in self.browse(cr, SUPERUSER_ID, ids, context): res[g.id] = map(int, computed_set(g)) return res _columns = { 'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid', string='Inherits', help='Users of this group automatically inherit those groups'), 'trans_implied_ids': fields.function(_get_trans_implied, type='many2many', relation='res.groups', string='Transitively inherits'), } def create(self, cr, uid, values, context=None): users = values.pop('users', None) gid = super(groups_implied, self).create(cr, uid, values, context) if users: # delegate addition of users to add implied groups self.write(cr, uid, [gid], {'users': users}, context) return gid def write(self, cr, uid, ids, values, context=None): res = super(groups_implied, self).write(cr, uid, ids, values, context) if values.get('users') or values.get('implied_ids'): # add all implied groups (to all users of each group) for g in self.browse(cr, uid, ids): gids = map(int, g.trans_implied_ids) vals = {'users': [(4, u.id) for u in g.users]} super(groups_implied, self).write(cr, uid, gids, vals, context) return res class users_implied(osv.osv): _inherit = 'res.users' def create(self, cr, uid, values, context=None): groups = values.pop('groups_id', None) user_id = super(users_implied, self).create(cr, uid, values, context) if groups: # delegate addition of groups to add implied groups self.write(cr, uid, [user_id], {'groups_id': groups}, context) return user_id def write(self, cr, uid, ids, values, context=None): if not isinstance(ids,list): ids = [ids] res = super(users_implied, self).write(cr, uid, ids, values, context) if values.get('groups_id'): # add implied groups for all users for user in self.browse(cr, uid, ids): gs = set(concat([g.trans_implied_ids for g in user.groups_id])) vals = {'groups_id': [(4, g.id) for g in gs]} super(users_implied, self).write(cr, uid, [user.id], vals, context) return res # # Extension of res.groups and res.users for the special groups view in the users # form. This extension presents groups with selection and boolean widgets: # - Groups are shown by application, with boolean and/or selection fields. # Selection fields typically defines a role "Name" for the given application. # - Uncategorized groups are presented as boolean fields and grouped in a # section "Others". # # The user form view is modified by an inherited view (base.user_groups_view); # the inherited view replaces the field 'groups_id' by a set of reified group # fields (boolean or selection fields). The arch of that view is regenerated # each time groups are changed. # # Naming conventions for reified groups fields: # - boolean field 'in_group_ID' is True iff # ID is in 'groups_id' # - boolean field 'in_groups_ID1_..._IDk' is True iff # any of ID1, ..., IDk is in 'groups_id' # - selection field 'sel_groups_ID1_..._IDk' is ID iff # ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk} def name_boolean_group(id): return 'in_group_' + str(id) def name_boolean_groups(ids): return 'in_groups_' + '_'.join(map(str, ids)) def name_selection_groups(ids): return 'sel_groups_' + '_'.join(map(str, ids)) def is_boolean_group(name): return name.startswith('in_group_') def is_boolean_groups(name): return name.startswith('in_groups_') def is_selection_groups(name): return name.startswith('sel_groups_') def is_reified_group(name): return is_boolean_group(name) or is_boolean_groups(name) or is_selection_groups(name) def get_boolean_group(name): return int(name[9:]) def get_boolean_groups(name): return map(int, name[10:].split('_')) def get_selection_groups(name): return map(int, name[11:].split('_')) def partition(f, xs): "return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))" yes, nos = [], [] for x in xs: (yes if f(x) else nos).append(x) return yes, nos class groups_view(osv.osv): _inherit = 'res.groups' def create(self, cr, uid, values, context=None): res = super(groups_view, self).create(cr, uid, values, context) self.update_user_groups_view(cr, uid, context) return res def write(self, cr, uid, ids, values, context=None): res = super(groups_view, self).write(cr, uid, ids, values, context) self.update_user_groups_view(cr, uid, context) return res def unlink(self, cr, uid, ids, context=None): res = super(groups_view, self).unlink(cr, uid, ids, context) self.update_user_groups_view(cr, uid, context) return res def update_user_groups_view(self, cr, uid, context=None): # the view with id 'base.user_groups_view' inherits the user form view, # and introduces the reified group fields if not context or context.get('install_mode'): # use installation/admin language for translatable names in the view context = dict(context or {}) context.update(self.pool['res.users'].context_get(cr, uid)) view = self.get_user_groups_view(cr, uid, context) if view: xml1, xml2 = [], [] xml1.append(E.separator(string=_('Application'), colspan="4")) for app, kind, gs in self.get_groups_by_application(cr, uid, context): # hide groups in category 'Hidden' (except to group_no_one) attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {} if kind == 'selection': # application name with a selection field field_name = name_selection_groups(map(int, gs)) xml1.append(E.field(name=field_name, **attrs)) xml1.append(E.newline()) else: # application separator with boolean fields app_name = app and app.name or _('Other') xml2.append(E.separator(string=app_name, colspan="4", **attrs)) for g in gs: field_name = name_boolean_group(g.id) xml2.append(E.field(name=field_name, **attrs)) xml = E.field(*(xml1 + xml2), name="groups_id", position="replace") xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS")) xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8") view.write({'arch': xml_content}) return True def get_user_groups_view(self, cr, uid, context=None): try: view = self.pool.get('ir.model.data').get_object(cr, SUPERUSER_ID, 'base', 'user_groups_view', context) assert view and view._table_name == 'ir.ui.view' except Exception: view = False return view def get_application_groups(self, cr, uid, domain=None, context=None): return self.search(cr, uid, domain or []) def get_groups_by_application(self, cr, uid, context=None): """ return all groups classified by application (module category), as a list of pairs: [(app, kind, [group, ...]), ...], where app and group are browse records, and kind is either 'boolean' or 'selection'. Applications are given in sequence order. If kind is 'selection', the groups are given in reverse implication order. """ def linearized(gs): gs = set(gs) # determine sequence order: a group should appear after its implied groups order = dict.fromkeys(gs, 0) for g in gs: for h in gs.intersection(g.trans_implied_ids): order[h] -= 1 # check whether order is total, i.e., sequence orders are distinct if len(set(order.itervalues())) == len(gs): return sorted(gs, key=lambda g: order[g]) return None # classify all groups by application gids = self.get_application_groups(cr, uid, context=context) by_app, others = {}, [] for g in self.browse(cr, uid, gids, context): if g.category_id: by_app.setdefault(g.category_id, []).append(g) else: others.append(g) # build the result res = [] apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0) for app in apps: gs = linearized(by_app[app]) if gs: res.append((app, 'selection', gs)) else: res.append((app, 'boolean', by_app[app])) if others: res.append((False, 'boolean', others)) return res class users_view(osv.osv): _inherit = 'res.users' def create(self, cr, uid, values, context=None): self._set_reified_groups(values) return super(users_view, self).create(cr, uid, values, context) def write(self, cr, uid, ids, values, context=None): self._set_reified_groups(values) return super(users_view, self).write(cr, uid, ids, values, context) def _set_reified_groups(self, values): """ reflect reified group fields in values['groups_id'] """ if 'groups_id' in values: # groups are already given, ignore group fields for f in filter(is_reified_group, values.iterkeys()): del values[f] return add, remove = [], [] for f in values.keys(): if is_boolean_group(f): target = add if values.pop(f) else remove target.append(get_boolean_group(f)) elif is_boolean_groups(f): if not values.pop(f): remove.extend(get_boolean_groups(f)) elif is_selection_groups(f): remove.extend(get_selection_groups(f)) selected = values.pop(f) if selected: add.append(selected) # update values *only* if groups are being modified, otherwise # we introduce spurious changes that might break the super.write() call. if add or remove: # remove groups in 'remove' and add groups in 'add' values['groups_id'] = [(3, id) for id in remove] + [(4, id) for id in add] def default_get(self, cr, uid, fields, context=None): group_fields, fields = partition(is_reified_group, fields) fields1 = (fields + ['groups_id']) if group_fields else fields values = super(users_view, self).default_get(cr, uid, fields1, context) self._get_reified_groups(group_fields, values) return values def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'): if not fields: fields = self.fields_get(cr, uid, context=context).keys() group_fields, fields = partition(is_reified_group, fields) if not 'groups_id' in fields: fields.append('groups_id') res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load) if res: for values in (res if isinstance(res, list) else [res]): self._get_reified_groups(group_fields, values) return res def _get_reified_groups(self, fields, values): """ compute the given reified group fields from values['groups_id'] """ gids = set(values.get('groups_id') or []) for f in fields: if is_boolean_group(f): values[f] = get_boolean_group(f) in gids elif is_boolean_groups(f): values[f] = not gids.isdisjoint(get_boolean_groups(f)) elif is_selection_groups(f): selected = [gid for gid in get_selection_groups(f) if gid in gids] values[f] = selected and selected[-1] or False def fields_get(self, cr, uid, allfields=None, context=None, write_access=True): res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access) # add reified groups fields if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'): return res for app, kind, gs in self.pool.get('res.groups').get_groups_by_application(cr, uid, context): if kind == 'selection': # selection group field tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment] res[name_selection_groups(map(int, gs))] = { 'type': 'selection', 'string': app and app.name or _('Other'), 'selection': [(False, '')] + [(g.id, g.name) for g in gs], 'help': '\n'.join(tips), 'exportable': False, 'selectable': False, } else: # boolean group fields for g in gs: res[name_boolean_group(g.id)] = { 'type': 'boolean', 'string': g.name, 'help': g.comment, 'exportable': False, 'selectable': False, } return res # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
//// [tests/cases/conformance/async/es5/awaitCallExpression/awaitCallExpression4_es5.ts] //// //// [awaitCallExpression4_es5.ts] declare var a: boolean; declare var p: Promise<boolean>; declare function fn(arg0: boolean, arg1: boolean, arg2: boolean): void; declare var o: { fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }; declare var pfn: Promise<{ (arg0: boolean, arg1: boolean, arg2: boolean): void; }>; declare var po: Promise<{ fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }>; declare function before(): void; declare function after(): void; async function func(): Promise<void> { before(); var b = (await pfn)(a, a, a); after(); } //// [awaitCallExpression4_es5.js] "use strict"; function func() { return __awaiter(this, void 0, void 0, function* () { before(); var b = (yield pfn)(a, a, a); after(); }); }
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/awaitCallExpression4_es5(target=es2015).js
/** * \file platform_time.h * * \brief Mbed TLS Platform time abstraction */ /* * Copyright The Mbed TLS Contributors * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later */ #ifndef MBEDTLS_PLATFORM_TIME_H #define MBEDTLS_PLATFORM_TIME_H #include "mbedtls/build_info.h" #ifdef __cplusplus extern "C" { #endif /* * The time_t datatype */ #if defined(MBEDTLS_PLATFORM_TIME_TYPE_MACRO) typedef MBEDTLS_PLATFORM_TIME_TYPE_MACRO mbedtls_time_t; #else /* For time_t */ #include <time.h> typedef time_t mbedtls_time_t; #endif /* MBEDTLS_PLATFORM_TIME_TYPE_MACRO */ #if defined(MBEDTLS_PLATFORM_MS_TIME_TYPE_MACRO) typedef MBEDTLS_PLATFORM_MS_TIME_TYPE_MACRO mbedtls_ms_time_t; #else #include <stdint.h> #include <inttypes.h> typedef int64_t mbedtls_ms_time_t; #endif /* MBEDTLS_PLATFORM_MS_TIME_TYPE_MACRO */ /** * \brief Get time in milliseconds. * * \return Monotonically-increasing current time in milliseconds. * * \note Define MBEDTLS_PLATFORM_MS_TIME_ALT to be able to provide an * alternative implementation * * \warning This function returns a monotonically-increasing time value from a * start time that will differ from platform to platform, and possibly * from run to run of the process. * */ mbedtls_ms_time_t mbedtls_ms_time(void); /* * The function pointers for time */ #if defined(MBEDTLS_PLATFORM_TIME_ALT) extern mbedtls_time_t (*mbedtls_time)(mbedtls_time_t *time); /** * \brief Set your own time function pointer * * \param time_func the time function implementation * * \return 0 */ int mbedtls_platform_set_time(mbedtls_time_t (*time_func)(mbedtls_time_t *time)); #else #if defined(MBEDTLS_PLATFORM_TIME_MACRO) #define mbedtls_time MBEDTLS_PLATFORM_TIME_MACRO #else #define mbedtls_time time #endif /* MBEDTLS_PLATFORM_TIME_MACRO */ #endif /* MBEDTLS_PLATFORM_TIME_ALT */ #ifdef __cplusplus } #endif #endif /* platform_time.h */
c
github
https://github.com/nodejs/node
deps/LIEF/third-party/mbedtls/include/mbedtls/platform_time.h
from datetime import ( datetime, timedelta, ) from io import StringIO import itertools from textwrap import dedent import numpy as np import pytest from pandas.errors import Pandas4Warning import pandas.util._test_decorators as td import pandas as pd from pandas import ( Categorical, DataFrame, Series, Timestamp, date_range, option_context, ) import pandas._testing as tm from pandas.core.internals.blocks import NumpyBlock # Segregated collection of methods that require the BlockManager internal data # structure class TestDataFrameBlockInternals: def test_setitem_invalidates_datetime_index_freq(self): # GH#24096 altering a datetime64tz column inplace invalidates the # `freq` attribute on the underlying DatetimeIndex dti = date_range("20130101", periods=3, tz="US/Eastern") ts = dti[1] df = DataFrame({"B": dti}) assert df["B"]._values.freq is None df.iloc[1, 0] = pd.NaT assert df["B"]._values.freq is None # check that the DatetimeIndex was not altered in place assert dti.freq == "D" assert dti[1] == ts def test_cast_internals(self, float_frame): msg = "Passing a BlockManager to DataFrame" with tm.assert_produces_warning( Pandas4Warning, match=msg, check_stacklevel=False ): casted = DataFrame(float_frame._mgr, dtype=int) expected = DataFrame(float_frame._series, dtype=int) tm.assert_frame_equal(casted, expected) with tm.assert_produces_warning( Pandas4Warning, match=msg, check_stacklevel=False ): casted = DataFrame(float_frame._mgr, dtype=np.int32) expected = DataFrame(float_frame._series, dtype=np.int32) tm.assert_frame_equal(casted, expected) def test_consolidate(self, float_frame): float_frame["E"] = 7.0 consolidated = float_frame._consolidate() assert len(consolidated._mgr.blocks) == 1 # Ensure copy, do I want this? recons = consolidated._consolidate() assert recons is not consolidated tm.assert_frame_equal(recons, consolidated) float_frame["F"] = 8.0 assert len(float_frame._mgr.blocks) == 3 return_value = float_frame._consolidate_inplace() assert return_value is None assert len(float_frame._mgr.blocks) == 1 def test_consolidate_inplace(self, float_frame): # triggers in-place consolidation for letter in range(ord("A"), ord("Z")): float_frame[chr(letter)] = chr(letter) def test_modify_values(self, float_frame): with pytest.raises(ValueError, match="read-only"): float_frame.values[5] = 5 assert (float_frame.values[5] != 5).all() def test_boolean_set_uncons(self, float_frame): float_frame["E"] = 7.0 expected = float_frame.values.copy() expected[expected > 1] = 2 float_frame[float_frame > 1] = 2 tm.assert_almost_equal(expected, float_frame.values) def test_constructor_with_convert(self): # this is actually mostly a test of lib.maybe_convert_objects # #2845 df = DataFrame({"A": [2**63 - 1]}) result = df["A"] expected = Series(np.asarray([2**63 - 1], np.int64), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [2**63]}) result = df["A"] expected = Series(np.asarray([2**63], np.uint64), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [datetime(2005, 1, 1), True]}) result = df["A"] expected = Series( np.asarray([datetime(2005, 1, 1), True], np.object_), name="A" ) tm.assert_series_equal(result, expected) df = DataFrame({"A": [None, 1]}) result = df["A"] expected = Series(np.asarray([np.nan, 1], np.float64), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0, 2]}) result = df["A"] expected = Series(np.asarray([1.0, 2], np.float64), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0 + 2.0j, 3]}) result = df["A"] expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex128), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0 + 2.0j, 3.0]}) result = df["A"] expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex128), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0 + 2.0j, True]}) result = df["A"] expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0, None]}) result = df["A"] expected = Series(np.asarray([1.0, np.nan], np.float64), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [1.0 + 2.0j, None]}) result = df["A"] expected = Series(np.asarray([1.0 + 2.0j, np.nan], np.complex128), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [2.0, 1, True, None]}) result = df["A"] expected = Series(np.asarray([2.0, 1, True, None], np.object_), name="A") tm.assert_series_equal(result, expected) df = DataFrame({"A": [2.0, 1, datetime(2006, 1, 1), None]}) result = df["A"] expected = Series( np.asarray([2.0, 1, datetime(2006, 1, 1), None], np.object_), name="A" ) tm.assert_series_equal(result, expected) def test_construction_with_mixed(self, float_string_frame, using_infer_string): # mixed-type frames float_string_frame["datetime"] = datetime.now() float_string_frame["timedelta"] = timedelta(days=1, seconds=1) assert float_string_frame["datetime"].dtype == "M8[us]" assert float_string_frame["timedelta"].dtype == "m8[us]" result = float_string_frame.dtypes expected = Series( [np.dtype("float64")] * 4 + [ np.dtype("object") if not using_infer_string else pd.StringDtype(na_value=np.nan), np.dtype("datetime64[us]"), np.dtype("timedelta64[us]"), ], index=[*list("ABCD"), "foo", "datetime", "timedelta"], ) tm.assert_series_equal(result, expected) def test_construction_with_conversions(self): # convert from a numpy array of non-ns timedelta64; as of 2.0 this does # *not* convert arr = np.array([1, 2, 3], dtype="timedelta64[s]") df = DataFrame({"A": arr}) expected = DataFrame( {"A": pd.timedelta_range("00:00:01", periods=3, freq="s")}, index=range(3) ) tm.assert_numpy_array_equal(df["A"].to_numpy(), arr) expected = DataFrame( { "dt1": Timestamp("20130101").as_unit("s"), "dt2": date_range("20130101", periods=3).astype("M8[s]"), # 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'), # FIXME: don't leave commented-out }, index=range(3), ) assert expected.dtypes["dt1"] == "M8[s]" assert expected.dtypes["dt2"] == "M8[s]" dt1 = np.datetime64("2013-01-01") dt2 = np.array( ["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]" ) df = DataFrame({"dt1": dt1, "dt2": dt2}) # df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01 # 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]') # FIXME: don't leave commented-out tm.assert_frame_equal(df, expected) def test_constructor_compound_dtypes(self): # GH 5191 # compound dtypes should raise not-implementederror def f(dtype): data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9)) return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype) msg = "compound dtypes are not implemented in the DataFrame constructor" with pytest.raises(NotImplementedError, match=msg): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) # pre-2.0 these used to work (though results may be unexpected) with pytest.raises(TypeError, match="argument must be"): f("int64") with pytest.raises(TypeError, match="argument must be"): f("float64") # 10822 msg = "^Unknown datetime string format, unable to parse: aa$" with pytest.raises(ValueError, match=msg): f("M8[ns]") def test_pickle_float_string_frame(self, float_string_frame, temp_file): unpickled = tm.round_trip_pickle(float_string_frame, temp_file) tm.assert_frame_equal(float_string_frame, unpickled) # buglet float_string_frame._mgr.ndim def test_pickle_empty(self, temp_file): empty_frame = DataFrame() unpickled = tm.round_trip_pickle(empty_frame, temp_file) repr(unpickled) def test_pickle_empty_tz_frame(self, timezone_frame, temp_file): unpickled = tm.round_trip_pickle(timezone_frame, temp_file) tm.assert_frame_equal(timezone_frame, unpickled) def test_consolidate_datetime64(self): # numpy vstack bug df = DataFrame( { "starting": pd.to_datetime( [ "2012-06-21 00:00", "2012-06-23 07:00", "2012-06-23 16:30", "2012-06-25 08:00", "2012-06-26 12:00", ] ), "ending": pd.to_datetime( [ "2012-06-23 07:00", "2012-06-23 16:30", "2012-06-25 08:00", "2012-06-26 12:00", "2012-06-27 08:00", ] ), "measure": [77, 65, 77, 0, 77], } ) ser_starting = df.starting ser_starting.index = ser_starting.values ser_starting = ser_starting.tz_localize("US/Eastern") ser_starting = ser_starting.tz_convert("UTC") ser_starting.index.name = "starting" ser_ending = df.ending ser_ending.index = ser_ending.values ser_ending = ser_ending.tz_localize("US/Eastern") ser_ending = ser_ending.tz_convert("UTC") ser_ending.index.name = "ending" df.starting = ser_starting.index df.ending = ser_ending.index tm.assert_index_equal(pd.DatetimeIndex(df.starting), ser_starting.index) tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index) def test_is_mixed_type(self, float_frame, float_string_frame): assert not float_frame._is_mixed_type assert float_string_frame._is_mixed_type def test_stale_cached_series_bug_473(self): # this is chained, but ok with option_context("chained_assignment", None): Y = DataFrame( np.random.default_rng(2).random((4, 4)), index=("a", "b", "c", "d"), columns=("e", "f", "g", "h"), ) repr(Y) Y["e"] = Y["e"].astype("object") with tm.raises_chained_assignment_error(): Y["g"]["c"] = np.nan repr(Y) Y.sum() Y["g"].sum() assert not pd.isna(Y["g"]["c"]) def test_strange_column_corruption_issue(self, performance_warning): # TODO(wesm): Unclear how exactly this is related to internal matters df = DataFrame(index=[0, 1]) df[0] = np.nan wasCol = {} with tm.assert_produces_warning( performance_warning, raise_on_extra_warnings=False ): for i, dt in enumerate(df.index): for col in range(100, 200): if col not in wasCol: wasCol[col] = 1 df[col] = np.nan df.loc[dt, col] = i myid = 100 first = len(df.loc[pd.isna(df[myid]), [myid]]) second = len(df.loc[pd.isna(df[myid]), [myid]]) assert first == second == 0 def test_constructor_no_pandas_array(self): # Ensure that NumpyExtensionArray isn't allowed inside Series # See https://github.com/pandas-dev/pandas/issues/23995 for more. arr = Series([1, 2, 3]).array result = DataFrame({"A": arr}) expected = DataFrame({"A": [1, 2, 3]}) tm.assert_frame_equal(result, expected) assert isinstance(result._mgr.blocks[0], NumpyBlock) assert result._mgr.blocks[0].is_numeric def test_add_column_with_pandas_array(self): # GH 26390 df = DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) df["c"] = pd.arrays.NumpyExtensionArray(np.array([1, 2, None, 3], dtype=object)) df2 = DataFrame( { "a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"], "c": pd.arrays.NumpyExtensionArray( np.array([1, 2, None, 3], dtype=object) ), } ) assert type(df["c"]._mgr.blocks[0]) == NumpyBlock assert df["c"]._mgr.blocks[0].is_object assert type(df2["c"]._mgr.blocks[0]) == NumpyBlock assert df2["c"]._mgr.blocks[0].is_object tm.assert_frame_equal(df, df2) def test_update_inplace_sets_valid_block_values(): # https://github.com/pandas-dev/pandas/issues/33457 df = DataFrame({"a": Series([1, 2, None], dtype="category")}) # inplace update of a single column with tm.raises_chained_assignment_error(): df["a"].fillna(1, inplace=True) # check we haven't put a Series into any block.values assert isinstance(df._mgr.blocks[0].values, Categorical) def get_longley_data(): # From statsmodels.datasets.longley # This specific dataset seems to trigger races in Pandas 3.0.0 more readily # than data frames used elsewhere in the tests longley_csv = StringIO( dedent( """"Obs","GNPDEFL","GNP","UNEMP","ARMED","POP","YEAR" 1,83,234289,2356,1590,107608,1947 2,88.5,259426,2325,1456,108632,1948 3,88.2,258054,3682,1616,109773,1949 4,89.5,284599,3351,1650,110929,1950 5,96.2,328975,2099,3099,112075,1951 6,98.1,346999,1932,3594,113270,1952 7,99,365385,1870,3547,115094,1953 8,100,363112,3578,3350,116219,1954 9,101.2,397469,2904,3048,117388,1955 10,104.6,419180,2822,2857,118734,1956 11,108.4,442769,2936,2798,120445,1957 12,110.8,444546,4681,2637,121950,1958 13,112.6,482704,3813,2552,123366,1959 14,114.2,502601,3931,2514,125368,1960 15,115.7,518173,4806,2572,127852,1961 16,116.9,554894,4007,2827,130081,1962 """ ) ) return pd.read_csv(longley_csv).iloc[:, [1, 2, 3, 4, 5, 6]].astype(float) # See gh-63685, comparisons and copying led to races in statsmodels tests # # This test spawns a thread pool, so it shouldn't run under xdist. # It generates warnings, so it needs warnings to be thread-safe as well @td.skip_if_thread_unsafe_warnings @pytest.mark.single_cpu def test_multithreaded_reading(): def numpy_assert(data, b): b.wait() tm.assert_almost_equal((data + 1) - 1, data.copy()) tm.run_multithreaded( numpy_assert, max_workers=8, arguments=(get_longley_data(),), pass_barrier=True ) def safe_is_const(s): try: return np.ptp(s) == 0.0 and np.any(s != 0.0) except Exception: return False def concat(data, b): b.wait() x = data.copy() nobs = len(x) trendarr = np.fliplr(np.vander(np.arange(1, nobs + 1, dtype=np.float64), 1)) x.apply(safe_is_const, 0) trendarr = DataFrame(trendarr, index=x.index, columns=["const"]) x = [trendarr, x] x = pd.concat(x[::1], axis=1) tm.assert_frame_equal(x, x) tm.run_multithreaded( concat, max_workers=8, arguments=(get_longley_data(),), pass_barrier=True )
python
github
https://github.com/pandas-dev/pandas
pandas/tests/frame/test_block_internals.py
import collections import functools import re import socket import string import inspect from threading import Timer, RLock def lchop(string, prefix): """Removes a prefix from string :param string: String, possibly prefixed with prefix :param prefix: Prefix to remove from string :returns: string without the prefix """ if string.startswith(prefix): return string[len(prefix):] return string def popwhile(predicate, iterable): """Generator function yielding items of iterable while predicate holds for each item :param predicate: function taking an item returning bool :param iterable: iterable :returns: iterable (generator function) """ while iterable: item = iterable.pop() if predicate(item): yield item else: break def partition(iterable, limit, key=lambda x: x): def pop_partition(): sum = 0.0 while sum < limit and iterable: sum += key(iterable[-1]) yield iterable.pop() partitions = [] iterable.sort(reverse=True) while iterable: partitions.append(list(pop_partition())) return partitions def round_dict(dic, places): """ Rounds all values in a dict containing only numeric types to `places` decimal places. If places is None, round to INT. """ if places is None: for key, value in dic.items(): dic[key] = round(value) else: for key, value in dic.items(): dic[key] = round(value, places) class ModuleList(collections.UserList): def __init__(self, status_handler, class_finder): self.status_handler = status_handler self.finder = class_finder super().__init__() def append(self, module, *args, **kwargs): module = self.finder.instanciate_class_from_module( module, *args, **kwargs) module.registered(self.status_handler) super().append(module) return module def get(self, find_id): find_id = int(find_id) for module in self: if id(module) == find_id: return module class KeyConstraintDict(collections.UserDict): """ A dict implementation with sets of valid and required keys :param valid_keys: Set of valid keys :param required_keys: Set of required keys, must be a subset of valid_keys """ class MissingKeys(Exception): def __init__(self, keys): self.keys = keys def __init__(self, valid_keys, required_keys): super().__init__() self.valid_keys = valid_keys self.required_keys = set(required_keys) self.seen_keys = set() def __setitem__(self, key, value): """Trying to add an invalid key will raise KeyError """ if key in self.valid_keys: self.seen_keys.add(key) self.data[key] = value else: raise KeyError(key) def __delitem__(self, key): self.seen_keys.remove(key) del self.data[key] def __iter__(self): """Iteration will raise a MissingKeys exception unless all required keys are set """ if self.missing(): raise self.MissingKeys(self.missing()) return self.data.__iter__() def missing(self): """Returns a set of keys that are required but not set """ return self.required_keys - (self.seen_keys & self.required_keys) def convert_position(pos, json): if pos < 0: pos = len(json) + (pos + 1) return pos def flatten(l): """ Flattens a hierarchy of nested lists into a single list containing all elements in order :param l: list of arbitrary types and lists :returns: list of arbitrary types """ l = list(l) i = 0 while i < len(l): while isinstance(l[i], list): if not l[i]: l.pop(i) i -= 1 break else: l[i:i + 1] = l[i] i += 1 return l def formatp(string, **kwargs): """ Function for advanced format strings with partial formatting This function consumes format strings with groups enclosed in brackets. A group enclosed in brackets will only become part of the result if all fields inside the group evaluate True in boolean contexts. Groups can be nested. The fields in a nested group do not count as fields in the enclosing group, i.e. the enclosing group will evaluate to an empty string even if a nested group would be eligible for formatting. Nesting is thus equivalent to a logical or of all enclosing groups with the enclosed group. Escaped brackets, i.e. \\\\[ and \\\\] are copied verbatim to output. :param string: Format string :param kwargs: keyword arguments providing data for the format string :returns: Formatted string """ def build_stack(string): """ Builds a stack with OpeningBracket, ClosingBracket and String tokens. Tokens have a level property denoting their nesting level. They also have a string property containing associated text (empty for all tokens but String tokens). """ class Token: string = "" class OpeningBracket(Token): pass class ClosingBracket(Token): pass class String(Token): def __init__(self, str): self.string = str TOKENS = { "[": OpeningBracket, "]": ClosingBracket, } stack = [] # Index of next unconsumed char next = 0 # Last consumed char prev = "" # Current char char = "" # Current level level = 0 while next < len(string): prev = char char = string[next] next += 1 if prev != "\\" and char in TOKENS: token = TOKENS[char]() token.index = next if char == "]": level -= 1 token.level = level if char == "[": level += 1 stack.append(token) else: if stack and isinstance(stack[-1], String): stack[-1].string += char else: token = String(char) token.level = level stack.append(token) return stack def build_tree(items, level=0): """ Builds a list-of-lists tree (in forward order) from a stack (reversed order), and formats the elements on the fly, discarding everything not eligible for inclusion. """ subtree = [] while items: nested = [] while items[0].level > level: nested.append(items.pop(0)) if nested: subtree.append(build_tree(nested, level + 1)) item = items.pop(0) if item.string: string = item.string if level == 0: subtree.append(string.format(**kwargs)) else: fields = re.findall(r"({(\w+)[^}]*})", string) successful_fields = 0 for fieldspec, fieldname in fields: if kwargs.get(fieldname, False): successful_fields += 1 if successful_fields == len(fields): subtree.append(string.format(**kwargs)) else: return [] return subtree def merge_tree(items): return "".join(flatten(items)).replace("\]", "]").replace("\[", "[") stack = build_stack(string) tree = build_tree(stack, 0) return merge_tree(tree) class TimeWrapper: """ A wrapper that implements __format__ and __bool__ for time differences and time spans. :param seconds: seconds (numeric) :param default_format: the default format to be used if no explicit format_spec is passed to __format__ Format string syntax: * %h, %m and %s are the hours, minutes and seconds without leading zeros (i.e. 0 to 59 for minutes and seconds) * %H, %M and %S are padded with a leading zero to two digits, i.e. 00 to 59 * %l and %L produce hours non-padded and padded but only if hours is not zero. If the hours are zero it produces an empty string. * %% produces a literal % * %E (only valid on beginning of the string) if the time is null, don't format anything but rather produce an empty string. If the time is non-null it is removed from the string. The formatted string is stripped, i.e. spaces on both ends of the result are removed """ class TimeTemplate(string.Template): delimiter = "%" idpattern = r"[a-zA-Z]" def __init__(self, seconds, default_format="%m:%S"): self.seconds = int(seconds) self.default_format = default_format def __bool__(self): """:returns: `bool(seconds)`, i.e. False if seconds == 0 and True otherwise """ return bool(self.seconds) def __format__(self, format_spec): """Formats the time span given the format_spec (or the default_format). """ format_spec = format_spec or self.default_format h = self.seconds // 3600 m, s = divmod(self.seconds % 3600, 60) l = h if h else "" L = "%02d" % h if h else "" if format_spec.startswith("%E"): format_spec = format_spec[2:] if not self.seconds: return "" return self.TimeTemplate(format_spec).substitute( h=h, m=m, s=s, H="%02d" % h, M="%02d" % m, S="%02d" % s, l=l, L=L, ).strip() def require(predicate): """Decorator factory for methods requiring a predicate. If the predicate is not fulfilled during a method call, the method call is skipped and None is returned. :param predicate: A callable returning a truth value :returns: Method decorator .. seealso:: :py:class:`internet` """ def decorator(method): @functools.wraps(method) def wrapper(*args, **kwargs): if predicate(): return method(*args, **kwargs) return None return wrapper return decorator class internet: """ Checks for internet connection by connecting to a server. Used server is determined by the `address` class variable which consists of server host name and port number. :rtype: bool .. seealso:: :py:func:`require` """ address = ("google-public-dns-a.google.com", 53) def __new__(cls): try: socket.create_connection(cls.address, 1).close() return True except OSError: return False def make_graph(values, lower_limit=0.0, upper_limit=100.0, style="blocks"): """ Draws a graph made of unicode characters. :param values: An array of values to graph. :param lower_limit: Minimum value for the y axis (or None for dynamic). :param upper_limit: Maximum value for the y axis (or None for dynamic). :param style: Drawing style ('blocks', 'braille-fill', 'braille-peak', or 'braille-snake'). :returns: Bar as a string """ values = [float(n) for n in values] mn, mx = min(values), max(values) mn = mn if lower_limit is None else min(mn, float(lower_limit)) mx = mx if upper_limit is None else max(mx, float(upper_limit)) extent = mx - mn if style == 'blocks': bar = '_▁▂▃▄▅▆▇█' bar_count = len(bar) - 1 if extent == 0: graph = '_' * len(values) else: graph = ''.join(bar[int((n - mn) / extent * bar_count)] for n in values) elif style in ['braille-fill', 'braille-peak', 'braille-snake']: # idea from https://github.com/asciimoo/drawille # unicode values from http://en.wikipedia.org/wiki/Braille vpad = values if len(values) % 2 == 0 else values + [mn] vscale = [round(4 * (vp - mn) / extent) for vp in vpad] l = len(vscale) // 2 # do the 2-character collapse separately for clarity if 'fill' in style: vbits = [[0, 0x40, 0x44, 0x46, 0x47][vs] for vs in vscale] elif 'peak' in style: vbits = [[0, 0x40, 0x04, 0x02, 0x01][vs] for vs in vscale] else: assert('snake' in style) # there are a few choices for what to put last in vb2. # arguable vscale[-1] from the _previous_ call is best. vb2 = [vscale[0]] + vscale + [0] vbits = [] for i in range(1, l + 1): c = 0 for j in range(min(vb2[i - 1], vb2[i], vb2[i + 1]), vb2[i] + 1): c |= [0, 0x40, 0x04, 0x02, 0x01][j] vbits.append(c) # 2-character collapse graph = '' for i in range(0, l, 2): b1 = vbits[i] b2 = vbits[i + 1] if b2 & 0x40: b2 = b2 - 0x30 b2 = b2 << 3 graph += chr(0x2800 + b1 + b2) else: raise NotImplementedError("Graph drawing style '%s' unimplemented." % style) return graph def make_vertical_bar(percentage, width=1): """ Draws a vertical bar made of unicode characters. :param value: A value between 0 and 100 :param width: How many characters wide the bar should be. :returns: Bar as a String """ bar = ' _▁▂▃▄▅▆▇█' percentage //= 10 if percentage < 0: output = bar[0] elif percentage >= len(bar): output = bar[-1] else: output = bar[percentage] return output * width def make_bar(percentage): """ Draws a bar made of unicode box characters. :param percentage: A value between 0 and 100 :returns: Bar as a string """ bars = [' ', '▏', '▎', '▍', '▌', '▋', '▋', '▊', '▊', '█'] tens = int(percentage / 10) ones = int(percentage) - tens * 10 result = tens * '█' if(ones >= 1): result = result + bars[ones] result = result + (10 - len(result)) * ' ' return result def user_open(url_or_command): """Open the specified paramater in the web browser if a URL is detected, othewrise pass the paramater to the shell as a subprocess. This function is inteded to bu used in on_leftclick/on_rightclick callbacks. :param url_or_command: String containing URL or command """ from urllib.parse import urlparse scheme = urlparse(url_or_command).scheme if scheme == 'http' or scheme == 'https': import webbrowser import os # webbrowser.open() sometimes prints a message for some reason and confuses i3 # Redirect stdout briefly to prevent this from happening. savout = os.dup(1) os.close(1) os.open(os.devnull, os.O_RDWR) try: webbrowser.open(url_or_command) finally: os.dup2(savout, 1) else: import subprocess subprocess.Popen(url_or_command, shell=True) class MultiClickHandler(object): def __init__(self, callback_handler, timeout): self.callback_handler = callback_handler self.timeout = timeout self.lock = RLock() self._timer_id = 0 self.timer = None self.button = None self.cb = None def set_timer(self, button, cb): with self.lock: self.clear_timer() self.timer = Timer(self.timeout, self._timer_function, args=[self._timer_id]) self.button = button self.cb = cb self.timer.start() def clear_timer(self): with self.lock: if self.timer is None: return self._timer_id += 1 # Invalidate existent timer self.timer.cancel() # Cancel the existent timer self.timer = None self.button = None self.cb = None def _timer_function(self, timer_id): with self.lock: if self._timer_id != timer_id: return self.callback_handler(self.button, self.cb) self.clear_timer() def check_double(self, button): if self.timer is None: return False ret = True if button != self.button: self.callback_handler(self.button, self.cb) ret = False self.clear_timer() return ret def get_module(function): """Function decorator for retrieving the ``self`` argument from the stack. Intended for use with callbacks that need access to a modules variables, for example: .. code:: python from i3pystatus import Status, get_module from i3pystatus.core.command import execute status = Status(...) # other modules etc. @get_module def display_ip_verbose(module): execute('sh -c "ip addr show dev {dev} | xmessage -file -"'.format(dev=module.interface)) status.register("network", interface="wlan1", on_leftclick=display_ip_verbose) """ @functools.wraps(function) def call_wrapper(*args, **kwargs): stack = inspect.stack() caller_frame_info = stack[1] self = caller_frame_info[0].f_locals["self"] # not completly sure whether this is necessary # see note in Python docs about stack frames del stack function(self, *args, **kwargs) return call_wrapper
unknown
codeparrot/codeparrot-clean
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package transit import ( "context" "fmt" "testing" "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/sdk/logical" ) func TestTransit_Restore(t *testing.T) { // Test setup: // - Create a key // - Configure it to be exportable, allowing deletion, and backups // - Capture backup // - Delete key // - Run test cases // // Each test case should start with no key present. If the 'Seed' parameter is // in the struct, we'll start by restoring it (without force) to run that test // as if the key already existed keyType := "aes256-gcm96" b, s := createBackendWithStorage(t) keyName := testhelpers.RandomWithPrefix("my-key") // Create a key keyReq := &logical.Request{ Path: "keys/" + keyName, Operation: logical.UpdateOperation, Storage: s, Data: map[string]interface{}{ "type": keyType, "exportable": true, }, } resp, err := b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } // Configure the key to allow its deletion and backup configReq := &logical.Request{ Path: fmt.Sprintf("keys/%s/config", keyName), Operation: logical.UpdateOperation, Storage: s, Data: map[string]interface{}{ "deletion_allowed": true, "allow_plaintext_backup": true, }, } resp, err = b.HandleRequest(context.Background(), configReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } // Take a backup of the key backupReq := &logical.Request{ Path: "backup/" + keyName, Operation: logical.ReadOperation, Storage: s, } resp, err = b.HandleRequest(context.Background(), backupReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } backupKey := resp.Data["backup"].(string) if backupKey == "" { t.Fatal("failed to get a backup") } // Delete the key to start test cases with clean slate keyReq.Operation = logical.DeleteOperation resp, err = b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } // helper func to get a pointer value for a boolean boolPtr := func(b bool) *bool { return &b } keyExitsError := fmt.Errorf("key %q already exists", keyName) testCases := []struct { Name string // Seed dermines if we start the test by restoring the initial backup we // took, to test a restore operation based on the key existing or not Seed bool // Force is a pointer to differentiate between default false and given false Force *bool // The error we expect, if any ExpectedErr error // RestoreName is used to restore the key to a differnt name RestoreName string }{ { // key does not already exist Name: "Default restore", }, { // key already exists Name: "Restore-without-force", Seed: true, ExpectedErr: keyExitsError, }, { // key already exists, use force to force a restore Name: "Restore-with-force", Seed: true, Force: boolPtr(true), }, { // using force shouldn't matter if the key doesn't exist Name: "Restore-with-force-no-seed", Force: boolPtr(true), }, { // key already exists, restore to new name Name: "Restore-new-name", Seed: true, RestoreName: "new-key", }, { // key already exists, restore to bad path, should error Name: "Restore-new-name-bad-path", Seed: true, RestoreName: "sub/path/new-key", ExpectedErr: ErrInvalidKeyName, }, { // using force shouldn't matter if the restore key name is different Name: "Restore-with-force-seed-new-name", Seed: true, Force: boolPtr(true), RestoreName: "other-key", }, { // using force shouldn't matter if the restore key name is different Name: "Restore-with-out-force-seed-new-name", Seed: true, Force: boolPtr(false), RestoreName: "other-key", }, { // using force shouldn't matter if the key doesn't exist Name: "Restore-force-false", Force: boolPtr(false), }, { // using false force should still error Name: "Restore-force-false", Seed: true, Force: boolPtr(false), ExpectedErr: keyExitsError, }, } for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { var resp *logical.Response var err error if tc.Seed { // restore our key to test a pre-existing key seedRestoreReq := &logical.Request{ Path: "restore", Operation: logical.UpdateOperation, Storage: s, Data: map[string]interface{}{ "backup": backupKey, }, } resp, err := b.HandleRequest(context.Background(), seedRestoreReq) if resp != nil && resp.IsError() { t.Fatalf("resp: %#v\nerr: %v", resp, err) } if err != nil && tc.ExpectedErr == nil { t.Fatalf("did not expect an error in SeedKey restore: %s", err) } } restorePath := "restore" if tc.RestoreName != "" { restorePath = fmt.Sprintf("%s/%s", restorePath, tc.RestoreName) } restoreReq := &logical.Request{ Path: restorePath, Operation: logical.UpdateOperation, Storage: s, Data: map[string]interface{}{ "backup": backupKey, }, } if tc.Force != nil { restoreReq.Data["force"] = *tc.Force } resp, err = b.HandleRequest(context.Background(), restoreReq) if resp != nil && resp.IsError() { t.Fatalf("resp: %#v\nerr: %v", resp, err) } if err == nil && tc.ExpectedErr != nil { t.Fatalf("expected an error, but got none") } if err != nil && tc.ExpectedErr == nil { t.Fatalf("unexpected error:%s", err) } if err != nil && tc.ExpectedErr != nil { if err.Error() != tc.ExpectedErr.Error() { t.Fatalf("expected error: (%s), got: (%s)", tc.ExpectedErr.Error(), err.Error()) } } readKeyName := keyName if tc.RestoreName != "" { readKeyName = tc.RestoreName } // read the key and make sure it's there readReq := &logical.Request{ Path: "keys/" + readKeyName, Operation: logical.ReadOperation, Storage: s, } resp, _ = b.HandleRequest(context.Background(), readReq) if resp != nil && resp.IsError() { t.Fatalf("resp: %#v\nerr: %v", resp, err) } if tc.ExpectedErr == nil && resp == nil { t.Fatal("expected to find a key, but got none") } // cleanup / delete key after each run keyReq.Operation = logical.DeleteOperation resp, err = b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } // cleanup / delete restore key after each run, if it was created if tc.RestoreName != "" && tc.ExpectedErr == nil { readReq.Operation = logical.DeleteOperation resp, err = b.HandleRequest(context.Background(), readReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) } } }) } }
go
github
https://github.com/hashicorp/vault
builtin/logical/transit/path_restore_test.go
"""attitude constraint corrections Revision ID: 420f9b8b9e Revises: 162f93d4393 Create Date: 2015-06-24 16:52:02.606637 """ # revision identifiers, used by Alembic. revision = '420f9b8b9e' down_revision = '162f93d4393' branch_labels = None depends_on = None from alembic import op def upgrade(): op.drop_constraint('fk_repos_id', 'users_attitude', type_='foreignkey') op.drop_constraint('fk_users_id', 'users_attitude', type_='foreignkey') op.create_foreign_key( 'fk_users_id', 'users_attitude', 'users', ['user_id'], ['id'], ondelete='CASCADE' ) op.create_foreign_key( 'fk_repos_id', 'users_attitude', 'repos', ['repo_id'], ['id'], ondelete='CASCADE' ) def downgrade(): op.drop_constraint('fk_repos_id', 'users_attitude', type_='foreignkey') op.drop_constraint('fk_users_id', 'users_attitude', type_='foreignkey') op.create_foreign_key('fk_users_id', 'users_attitude', 'users', ['user_id'], ['id']) op.create_foreign_key('fk_repos_id', 'users_attitude', 'repos', ['repo_id'], ['id'])
unknown
codeparrot/codeparrot-clean
######################################################################## # $HeadURL$ # File: RequestProxyHandler.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2012/07/20 13:18:41 ######################################################################## """ :mod: RequestProxyHandler .. module: RequestProxyHandler :synopsis: RequestProxy service .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com Careful with that axe, Eugene! Some 'transfer' requests are using local fs and they never should be forwarded to the central RequestManager. """ __RCSID__ = "$Id$" ## # @file RequestProxyHandler.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2012/07/20 13:18:58 # @brief Definition of RequestProxyHandler class. ## imports import os from types import StringType try: from hashlib import md5 except ImportError: from md5 import md5 ## from DIRAC from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.DISET.RPCClient import RPCClient from DIRAC.RequestManagementSystem.Client.RequestContainer import RequestContainer from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler from DIRAC.Core.Utilities.File import makeGuid def initializeRequestProxyHandler( serviceInfo ): """ init RequestProxy handler :param serviceInfo: whatever """ gLogger.info("Initalizing RequestProxyHandler") gThreadScheduler.addPeriodicTask( 120, RequestProxyHandler.sweeper ) return S_OK() ######################################################################## class RequestProxyHandler( RequestHandler ): """ .. class:: RequestProxyHandler :param RPCCLient requestManager: a RPCClient to RequestManager :param str cacheDir: os.path.join( workDir, "requestCache" ) """ __requestManager = None __cacheDir = None def initialize( self ): """ service initialisation :param self: self reference """ gLogger.notice( "CacheDirectory: %s" % self.cacheDir() ) return S_OK() @classmethod def requestManager( cls ): """ get request manager """ if not cls.__requestManager: cls.__requestManager = RPCClient( "RequestManagement/RequestManager" ) return cls.__requestManager @classmethod def cacheDir( cls ): """ get cache dir """ if not cls.__cacheDir: cls.__cacheDir = os.path.abspath( "requestCache" ) if not os.path.exists( cls.__cacheDir ): os.mkdir( cls.__cacheDir ) return cls.__cacheDir @classmethod def sweeper( cls ): """ move cached request to the central request manager :param cls: class reference """ cacheDir = cls.cacheDir() ## cache dir empty? if not os.listdir( cacheDir ): gLogger.always("sweeper: CacheDir %s is empty, nothing to do" % cacheDir ) return S_OK() else: ## read 10 cache dir files, the oldest first cachedRequests = [ os.path.abspath( requestFile ) for requestFile in sorted( filter( os.path.isfile, [ os.path.join( cacheDir, requestName ) for requestName in os.listdir( cacheDir ) ] ), key = os.path.getctime ) ][:30] ## set cached requests to the central RequestManager for cachedFile in cachedRequests: try: requestString = "".join( open( cachedFile, "r" ).readlines() ) cachedRequest = RequestContainer( requestString ) requestName = cachedRequest.getAttribute("RequestName")["Value"] ## cibak: hack for DISET requests if requestName == "Unknown": cachedRequest.setAttribute( "RequestName", makeGuid() ) requestName = cachedRequest.getAttribute("RequestName")["Value"] setRequest = cls.requestManager().setRequest( requestName, requestString ) if not setRequest["OK"]: gLogger.error("sweeper: unable to set request '%s' @ RequestManager: %s" % ( requestName, setRequest["Message"] ) ) continue gLogger.info("sweeper: successfully set request '%s' @ RequestManager" % requestName ) os.unlink( cachedFile ) except Exception, error: gLogger.exception( "sweeper: hit by exception %s" % str(error) ) return S_ERROR( "sweeper: hit by exception: %s" % str(error) ) return S_OK() def __saveRequest( self, requestName, requestString ): """ save request string to the working dir cache :param self: self reference :param str requestName: request name :param str requestString: xml-serialised request """ try: requestFile = os.path.join( self.cacheDir(), md5(requestString).hexdigest() ) request = open( requestFile, "w+") request.write( requestString ) request.close() return S_OK( requestFile ) except OSError, error: err = "unable to dump %s to cache file: %s" % ( requestName, str(error) ) gLogger.exception( err ) return S_ERROR( err ) types_getStatus = [] def export_getStatus( self ): """ get number of requests in cache """ try: cachedRequests = len( os.listdir( self.cacheDir() ) ) except OSError, error: err = "getStatus: unable to list cache dir contents: %s" % str(error) gLogger.exception( err ) return S_ERROR( err ) return S_OK( cachedRequests ) types_setRequest = [ StringType, StringType ] def export_setRequest( self, requestName, requestString ): """ forward request from local RequestDB to central RequestClient :param self: self reference :param str requestName: request name :param str requestString: request serilised to xml """ gLogger.info("setRequest: got '%s' request" % requestName ) forwardable = self.__forwardable( requestString ) if not forwardable["OK"]: gLogger.error("setRequest: unable to forward %s: %s" % ( requestName, forwardable["Message"] ) ) return forwardable setRequest = self.requestManager().setRequest( requestName, requestString ) if not setRequest["OK"]: gLogger.error("setReqeuest: unable to set request '%s' @ RequestManager: %s" % ( requestName, setRequest["Message"] ) ) ## put request to the request file cache save = self.__saveRequest( requestName, requestString ) if not save["OK"]: gLogger.error("setRequest: unable to save request to the cache: %s" % save["Message"] ) return save gLogger.info("setRequest: %s is saved to %s file" % ( requestName, save["Value"] ) ) return S_OK( { "set" : False, "saved" : True } ) gLogger.info("setRequest: request '%s' has been set to the RequestManager" % requestName ) return S_OK( { "set" : True, "saved" : False } ) @staticmethod def __forwardable( requestString ): """ check if request if forwardable The sub-request of type transfer:putAndRegister, removal:physicalRemoval and removal:reTransfer are definitely not, they should be executed locally, as they are using local fs. :param str requestString: XML-serialised request """ request = RequestContainer( requestString ) subRequests = request.getSubRequests( "transfer" )["Value"] + request.getSubRequests( "removal" )["Value"] for subRequest in subRequests: if subRequest["Attributes"]["Operation"] in ( "putAndRegister", "physicalRemoval", "reTransfer" ): return S_ERROR("found operation '%s' that cannot be forwarded" % subRequest["Attributes"]["Operation"] ) return S_OK()
unknown
codeparrot/codeparrot-clean
# This Python file uses the following encoding: utf-8 # (C) 2020 Muthiah Annamalai # This file is part of open-tamil project import string from tamil.utf8 import get_letters from tamil.numeral import num2tamilstr def normalize_numeral_text(text_tokens): """ Input @text_tokens = ["இரு","நண்பர்கள்","௹","100","கொடுத்து","உணவு","உண்டனர்."] ^ - எண் 100 என்பது சொல்வடிவில் நூறு என்று வெளியிடப்படும். """ rval = [] for word in text_tokens: if (word[0] in string.digits) or word[0] == '-': try: val = num2tamilstr(word) rval.append(val) except Exception as e: rval.append(word) else: rval.append(word) return rval def normalize_punctuation_text(text_tokens): """ Input @text_tokens = ["இரு","நண்பர்கள்","௹","100","கொடுத்து","உணவு","உண்டனர்."] ^ ரூபாய் என்று மாற்றி வெளியிடப்படும். """ special_char_map = {'!':'ஆச்சரியக்குறி', '!=':'சமன்பாடு இல்லை', ',':'துணைக்குறி', '#':'எண்', '$':'டாலர்', '™':'முத்திரை', '©':'பதிப்புரிமை', '௹':'ரூபாய்', '₹':'ரூபாய்', '£':'பவுண்டு', '%':'சதவிகிதம்', '&':'மற்றும்', '*':'நட்சத்திரக்குறி', '(':'அடைப்புகுக்குறி தொடக்கம்', ')':'அடைப்புகுக்குறி முடிவு', '[':'அடைப்புகுக்குறி தொடக்கம்', ']':'அடைப்புகுக்குறி முடிவு', '{':'அடைப்புகுக்குறி தொடக்கம்', '}':'அடைப்புகுக்குறி முடிவு', '+':'கூட்டல்குறி', '-':'கழித்தல்குறி', 'x':'பெருக்கல்குறி', '/':'வகுத்தல்குறி', '=':'சமன்பாடுக்குறி', ':':'புள்ளி', '"':'மேற்கோள்குறி', '\'':'மேற்கோள்குறி', ';':'அரைப்புள்ளி', '.':'முற்றுப்புள்ளி', '?':'கேள்விக்குறி'} rval = [] for char in text_tokens: rval.append( special_char_map.get(char,char) ) return rval
unknown
codeparrot/codeparrot-clean
//===--- ASTDumper.h - Swift AST Dumper flags -------------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2025 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file defines types that are used to control the level of detail printed // by the AST dumper. // //===----------------------------------------------------------------------===// #ifndef SWIFT_AST_AST_DUMPER_H #define SWIFT_AST_AST_DUMPER_H namespace swift { /// Describes the nature of requests that should be kicked off, if any, to /// compute members and top-level decls when dumping an AST. enum class ASTDumpMemberLoading { /// Dump cached members if available; if they are not, do not kick off any /// parsing or type-checking requests. None, /// Dump parsed members, kicking off a parsing request if necessary to compute /// them, but not performing additional type-checking. Parsed, /// Dump all fully-type checked members, kicking off any requests necessary to /// compute them. TypeChecked, }; } // namespace swift #endif // SWIFT_AST_AST_DUMPER_H
c
github
https://github.com/apple/swift
include/swift/AST/ASTDumper.h
from pandac.PandaModules import * from toontown.toonbase.ToontownBattleGlobals import * from direct.directnotify import DirectNotifyGlobal from direct.distributed.PyDatagram import PyDatagram from direct.distributed.PyDatagramIterator import PyDatagramIterator from otp.otpbase import OTPGlobals class Experience: notify = DirectNotifyGlobal.directNotify.newCategory('Experience') def __init__(self, expStr = None, owner = None): self.owner = owner if expStr == None: self.experience = [] for track in xrange(0, len(Tracks)): self.experience.append(StartingLevel) else: self.experience = self.makeFromNetString(expStr) return def __str__(self): return str(self.experience) def makeNetString(self): dataList = self.experience datagram = PyDatagram() for track in xrange(0, len(Tracks)): datagram.addUint16(dataList[track]) dgi = PyDatagramIterator(datagram) return dgi.getRemainingBytes() def makeFromNetString(self, netString): dataList = [] dg = PyDatagram(netString) dgi = PyDatagramIterator(dg) for track in xrange(0, len(Tracks)): dataList.append(dgi.getUint16()) return dataList def addExp(self, track, amount = 1): if type(track) == type(''): track = Tracks.index(track) self.notify.debug('adding %d exp to track %d' % (amount, track)) if self.owner.getGameAccess() == OTPGlobals.AccessFull: if self.experience[track] + amount <= MaxSkill: self.experience[track] += amount else: self.experience[track] = MaxSkill elif self.experience[track] + amount <= UnpaidMaxSkills[track]: self.experience[track] += amount elif self.experience[track] > UnpaidMaxSkills[track]: self.experience[track] += 0 else: self.experience[track] = UnpaidMaxSkills[track] def maxOutExp(self): for track in xrange(0, len(Tracks)): self.experience[track] = MaxSkill - UberSkill def maxOutExpMinusOne(self): for track in xrange(0, len(Tracks)): self.experience[track] = MaxSkill - 1 def makeExpHigh(self): for track in xrange(0, len(Tracks)): self.experience[track] = Levels[track][len(Levels[track]) - 1] - 1 def makeExpRegular(self): import random for track in xrange(0, len(Tracks)): rank = random.choice((0, int(random.random() * 1500.0), int(random.random() * 2000.0))) self.experience[track] = Levels[track][len(Levels[track]) - 1] - rank def zeroOutExp(self): for track in xrange(0, len(Tracks)): self.experience[track] = StartingLevel def setAllExp(self, num): for track in xrange(0, len(Tracks)): self.experience[track] = num def getExp(self, track): if type(track) == type(''): track = Tracks.index(track) return self.experience[track] def setExp(self, track, exp): if type(track) == type(''): track = Tracks.index(track) self.experience[track] = exp def getExpLevel(self, track): if type(track) == type(''): track = Tracks.index(track) level = 0 for amount in Levels[track]: if self.experience[track] >= amount: level = Levels[track].index(amount) return level def getTotalExp(self): total = 0 for level in self.experience: total += level return total def getNextExpValue(self, track, curSkill = None): if curSkill == None: curSkill = self.experience[track] retVal = Levels[track][len(Levels[track]) - 1] for amount in Levels[track]: if curSkill < amount: retVal = amount return retVal return retVal def getNewGagIndexList(self, track, extraSkill): retList = [] curSkill = self.experience[track] nextExpValue = self.getNextExpValue(track, curSkill) finalGagFlag = 0 while curSkill + extraSkill >= nextExpValue and curSkill < nextExpValue and not finalGagFlag: retList.append(Levels[track].index(nextExpValue)) newNextExpValue = self.getNextExpValue(track, nextExpValue) if newNextExpValue == nextExpValue: finalGagFlag = 1 else: nextExpValue = newNextExpValue return retList
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.connect.storage; import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import java.util.Map; /** * Wrapper class for a connector configuration that has been used to generate task configurations * Supports lazy {@link WorkerConfigTransformer#transform(Map) transformation}. */ public class AppliedConnectorConfig { private final Map<String, String> rawConfig; private Map<String, String> transformedConfig; /** * Create a new applied config that has not yet undergone * {@link WorkerConfigTransformer#transform(Map) transformation}. * @param rawConfig the non-transformed connector configuration; may be null */ public AppliedConnectorConfig(Map<String, String> rawConfig) { this.rawConfig = rawConfig; } /** * If necessary, {@link WorkerConfigTransformer#transform(Map) transform} the raw * connector config, then return the result. Transformed configurations are cached and * returned in all subsequent calls. * <p> * This method is thread-safe: different threads may invoke it at any time and the same * transformed config should always be returned, with transformation still only ever * taking place once before its results are cached. * @param configTransformer the transformer to use, if no transformed connector * config has been cached yet; may be null * @return the possibly-cached, transformed, connector config; may be null */ public synchronized Map<String, String> transformedConfig(WorkerConfigTransformer configTransformer) { if (transformedConfig != null || rawConfig == null) return transformedConfig; if (configTransformer != null) { transformedConfig = configTransformer.transform(rawConfig); } else { transformedConfig = rawConfig; } return transformedConfig; } }
java
github
https://github.com/apache/kafka
connect/runtime/src/main/java/org/apache/kafka/connect/storage/AppliedConnectorConfig.java
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package collections import "iter" // Set represents an associative array from keys of type K to values of type V. // // A caller-provided "key function" defines how to produce a comparable unique // key for each distinct value of type K. // // Map operations are not concurrency-safe. Use external locking if multiple // goroutines might modify the map concurrently or if one goroutine might // read a map while another is modifying it. type Map[K, V any] struct { elems map[UniqueKey[K]]MapElem[K, V] key func(K) UniqueKey[K] } // MapElem represents a single element of a map. type MapElem[K, V any] struct { K K V V } // NewMap constructs a new map whose key type knows how to calculate its own // unique keys, by implementing [UniqueKeyer] of itself. func NewMap[K UniqueKeyer[K], V any](elems ...MapElem[K, V]) Map[K, V] { m := NewMapFunc[K, V](K.UniqueKey) for _, elems := range elems { m.Put(elems.K, elems.V) } return m } // NewMapFunc constructs a new map with the given "map function". // // A valid key function must produce only values of types that can be compared // for equality using the Go == operator, and must guarantee that each unique // value of K has a corresponding key that uniquely identifies it. The // implementer of the key function can decide what constitutes a // "unique value of K", based on the meaning of type K. // // Type V is unconstrained by the arguments, so callers must explicitly provide // the key and value type arguments when calling this function. func NewMapFunc[K, V any](keyFunc func(K) UniqueKey[K]) Map[K, V] { return Map[K, V]{ elems: make(map[UniqueKey[K]]MapElem[K, V]), key: keyFunc, } } // NewMapCmp constructs a new set for any comparable key type, using the // built-in == operator as the definition of key equivalence. // // This is here for completeness in case it's useful when talking to a // generalized API that operates on maps of any key type, but if your // key type is fixed and known to be comparable then it's pointless to // use [Map]; use Go's built-in map types instead, which will then avoid // redundantly storing the keys twice. func NewMapCmp[K comparable, V any]() Map[K, V] { return NewMapFunc[K, V](cmpUniqueKeyFunc[K]) } // HasKey returns true if the map has an element with the given key, or // false otherwise. func (m Map[K, V]) HasKey(k K) bool { if m.key == nil { return false // an uninitialized map has no keys } uniq := m.key(k) _, exists := m.elems[uniq] return exists } // Get retrieves the value associated with the given key, or the zero value // of V if no matching element exists in the map. func (m Map[K, V]) Get(k K) V { ret, _ := m.GetOk(k) return ret } // GetOk is like [Map.Get] but also returns a second boolean result reporting // whether a matching element was present in the map. func (m Map[K, V]) GetOk(k K) (V, bool) { if m.key == nil { var zero V return zero, false // an uninitialized map has no keys } uniq := m.key(k) ret, ok := m.elems[uniq] return ret.V, ok } // Put writes a new element into the map, with the given key and value. // // If there is already an element with an equivalent key (as determined by the // set's "key function") then the new element replaces that existing element. func (m Map[K, V]) Put(k K, v V) { if m.key == nil { panic("Put into uninitialized collections.Map") } uniq := m.key(k) m.elems[uniq] = MapElem[K, V]{ K: k, V: v, } } // Delete removes from the map the element with the given key, or does nothing // if there is no such element. func (m Map[K, V]) Delete(k K) { if m.key == nil { panic("Delete from uninitialized collections.Map") } uniq := m.key(k) delete(m.elems, uniq) } // All returns an iterator over the elements of the map, in an unspecified // order. // // This is intended for use in a range-over-func statement, like this: // // for k, v := range map.All() { // // do something with k and/or v // } // // Modifying the map during iteration causes unspecified results. Modifying // the map concurrently with advancing the iterator causes undefined behavior // including possible memory unsafety. func (m Map[K, V]) All() iter.Seq2[K, V] { return func(yield func(K, V) bool) { for _, elem := range m.elems { if !yield(elem.K, elem.V) { return } } } } // Len returns the number of elements in the map. func (m Map[K, V]) Len() int { return len(m.elems) }
go
github
https://github.com/hashicorp/terraform
internal/collections/map.go
from __future__ import annotations from functools import partial import operator from pathlib import Path from typing import ( TYPE_CHECKING, Any, Literal, Self, cast, ) import warnings import numpy as np from pandas._config import ( get_option, using_string_dtype, ) from pandas._libs import ( lib, missing as libmissing, ) from pandas._libs.arrays import NDArrayBacked from pandas._libs.lib import ensure_string_array from pandas.compat import ( HAS_PYARROW, PYARROW_MIN_VERSION, ) from pandas.compat.numpy import function as nv from pandas.errors import Pandas4Warning from pandas.util._decorators import ( set_module, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, StorageExtensionDtype, register_extension_dtype, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_integer_dtype, is_object_dtype, is_string_dtype, pandas_dtype, ) from pandas.core import ( missing, nanops, ops, roperator, ) from pandas.core.algorithms import isin from pandas.core.array_algos import masked_reductions from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.floating import ( FloatingArray, FloatingDtype, ) from pandas.core.arrays.integer import ( IntegerArray, IntegerDtype, ) from pandas.core.arrays.numpy_ import NumpyExtensionArray from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer from pandas.core.missing import isna from pandas.io.formats import printing if HAS_PYARROW: import pyarrow as pa import pyarrow.compute as pc if TYPE_CHECKING: from collections.abc import MutableMapping import pyarrow from pandas._typing import ( ArrayLike, AxisInt, Dtype, DtypeObj, NumpySorter, NumpyValueArrayLike, Scalar, npt, type_t, ) from pandas import Series @set_module("pandas") @register_extension_dtype class StringDtype(StorageExtensionDtype): """ Extension dtype for string data. .. warning:: StringDtype is considered experimental. The implementation and parts of the API may change without warning. Parameters ---------- storage : {"python", "pyarrow"}, optional If not given, the value of ``pd.options.mode.string_storage``. na_value : {np.nan, pd.NA}, default pd.NA Whether the dtype follows NaN or NA missing value semantics. Attributes ---------- storage na_value Methods ------- None See Also -------- BooleanDtype : Extension dtype for boolean data. Examples -------- >>> pd.StringDtype() <StringDtype(na_value=<NA>)> >>> pd.StringDtype(storage="python") <StringDtype(storage='python', na_value=<NA>)> """ @property def name(self) -> str: # type: ignore[override] if self._na_value is libmissing.NA: return "string" else: return "str" #: StringDtype().na_value uses pandas.NA except the implementation that # follows NumPy semantics, which uses nan. @property def na_value(self) -> libmissing.NAType | float: # type: ignore[override] """ The missing value representation for this dtype. This value indicates which missing value semantics are used by this dtype. Returns ``np.nan`` for the default string dtype with NumPy semantics, and ``pd.NA`` for the opt-in string dtype with pandas NA semantics. See Also -------- isna : Detect missing values. NA : Missing value indicator for nullable dtypes. Examples -------- >>> ser = pd.Series(["a", "b"]) >>> ser.dtype <StringDtype(na_value=nan)> >>> ser.dtype.na_value nan """ return self._na_value @property def storage(self) -> str: """ The storage backend for this dtype. Can be either "pyarrow" or "python". See Also -------- StringDtype.na_value : The missing value for this dtype. Examples -------- >>> ser = pd.Series(["a", "b"]) >>> ser.dtype <StringDtype(na_value=nan)> >>> ser.dtype.storage 'pyarrow' """ return self._storage _metadata = ("storage", "_na_value") # type: ignore[assignment] def __init__( self, storage: str | None = None, na_value: libmissing.NAType | float = libmissing.NA, ) -> None: # infer defaults if storage is None: storage = get_option("mode.string_storage") if storage == "auto": if HAS_PYARROW: storage = "pyarrow" else: storage = "python" # validate options if storage not in {"python", "pyarrow"}: raise ValueError( f"Storage must be 'python' or 'pyarrow'. Got {storage} instead." ) if storage == "pyarrow" and not HAS_PYARROW: raise ImportError( f"pyarrow>={PYARROW_MIN_VERSION} is required for PyArrow " "backed StringArray." ) if isinstance(na_value, float) and np.isnan(na_value): # when passed a NaN value, always set to np.nan to ensure we use # a consistent NaN value (and we can use `dtype.na_value is np.nan`) na_value = np.nan elif na_value is not libmissing.NA: raise ValueError(f"'na_value' must be np.nan or pd.NA, got {na_value}") self._storage = cast(str, storage) self._na_value = na_value def __repr__(self) -> str: storage = "" if self.storage == "pyarrow" else "storage='python', " return f"<StringDtype({storage}na_value={self._na_value})>" def __eq__(self, other: object) -> bool: # we need to override the base class __eq__ because na_value (NA or NaN) # cannot be checked with normal `==` if isinstance(other, str): # TODO should dtype == "string" work for the NaN variant? if other == "string" or other == self.name: # noqa: PLR1714 (repeated-equality-comparison) return True try: other = self.construct_from_string(other) except (TypeError, ImportError): # TypeError if `other` is not a valid string for StringDtype # ImportError if pyarrow is not installed for "string[pyarrow]" return False if isinstance(other, type(self)): return self.storage == other.storage and self.na_value is other.na_value return False def __setstate__(self, state: MutableMapping[str, Any]) -> None: # back-compat for pandas < 2.3, where na_value did not yet exist self._storage = state.pop("storage", "python") self._na_value = state.pop("_na_value", libmissing.NA) def __hash__(self) -> int: # need to override __hash__ as well because of overriding __eq__ return super().__hash__() def __reduce__(self): return StringDtype, (self.storage, self.na_value) @property def type(self) -> type[str]: return str @classmethod def construct_from_string(cls, string) -> Self: """ Construct a StringDtype from a string. Parameters ---------- string : str The type of the name. The storage type will be taking from `string`. Valid options and their storage types are ========================== ============================================== string result storage ========================== ============================================== ``'string'`` pd.options.mode.string_storage, default python ``'string[python]'`` python ``'string[pyarrow]'`` pyarrow ========================== ============================================== Returns ------- StringDtype Raise ----- TypeError If the string is not a valid option. """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string == "string": return cls() elif string == "str" and using_string_dtype(): return cls(na_value=np.nan) elif string == "string[python]": return cls(storage="python") elif string == "string[pyarrow]": return cls(storage="pyarrow") else: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") def construct_array_type(self) -> type_t[BaseStringArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.string_arrow import ( ArrowStringArray, ) if self.storage == "python" and self._na_value is libmissing.NA: return StringArray elif self.storage == "pyarrow" and self._na_value is libmissing.NA: return ArrowStringArray elif self.storage == "python": return StringArray else: return ArrowStringArray def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: storages = set() na_values = set() for dtype in dtypes: if isinstance(dtype, StringDtype): storages.add(dtype.storage) na_values.add(dtype.na_value) elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "T"): continue else: return None if len(storages) == 2: # if both python and pyarrow storage -> priority to pyarrow storage = "pyarrow" else: storage = next(iter(storages)) na_value: libmissing.NAType | float if len(na_values) == 2: # if both NaN and NA -> priority to NA na_value = libmissing.NA else: na_value = next(iter(na_values)) return StringDtype(storage=storage, na_value=na_value) def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BaseStringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. """ if self.storage == "pyarrow": from pandas.core.arrays.string_arrow import ( ArrowStringArray, _check_pyarrow_available, ) _check_pyarrow_available() if not pa.types.is_large_string(array.type): array = pc.cast(array, pa.large_string()) return ArrowStringArray(array, dtype=self) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: # pyarrow.ChunkedArray chunks = array.chunks results = [] for arr in chunks: # convert chunk by chunk to numpy and concatenate then, to avoid # overflow for large string data when concatenating the pyarrow arrays arr = arr.to_numpy(zero_copy_only=False) arr = ensure_string_array(arr, na_value=self.na_value) results.append(arr) if len(chunks) == 0: arr = np.array([], dtype=object) else: arr = np.concatenate(results) # Bypass validation inside StringArray constructor, see GH#47781 new_string_array = StringArray.__new__(StringArray) NDArrayBacked.__init__(new_string_array, arr, self) return new_string_array class BaseStringArray(ExtensionArray): """ Mixin class for StringArray, ArrowStringArray. """ dtype: StringDtype # TODO(4.0): Once the deprecation here is enforced, this method can be # removed and we use the parent class method instead. def _logical_method(self, other, op): if ( op in (roperator.ror_, roperator.rand_, roperator.rxor) and isinstance(other, np.ndarray) and other.dtype == bool ): # GH#60234 backward compatibility for the move to StringDtype in 3.0 op_name = op.__name__[1:].strip("_") warnings.warn( f"'{op_name}' operations between boolean dtype and {self.dtype} are " "deprecated and will raise in a future version. Explicitly " "cast the strings to a boolean dtype before operating instead.", Pandas4Warning, stacklevel=find_stack_level(), ) return op(other, self.astype(bool)) return NotImplemented def tolist(self) -> list: """ Return a list of the value. These are each a scalar type, which is a Python scalar (for str, int, float) or pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ---------- list Examples ---------- >>> arr = pd.array(["a", "b", "c"]) >>> arr.tolist() ['a', 'b', 'c'] """ if self.ndim > 1: return [x.tolist() for x in self] return list(self.to_numpy()) def _formatter(self, boxed: bool = False): formatter = partial( printing.pprint_thing, escape_chars=("\t", "\r", "\n"), quote_strings=not boxed, ) return formatter def _str_map( self, f, na_value=lib.no_default, dtype: Dtype | None = None, convert: bool = True, ): if self.dtype.na_value is np.nan: return self._str_map_nan_semantics(f, na_value=na_value, dtype=dtype) from pandas.arrays import BooleanArray if dtype is None: dtype = self.dtype if na_value is lib.no_default: na_value = self.dtype.na_value mask = isna(self) arr = np.asarray(self) if is_integer_dtype(dtype) or is_bool_dtype(dtype): constructor: type[IntegerArray | BooleanArray] if is_integer_dtype(dtype): constructor = IntegerArray else: constructor = BooleanArray na_value_is_na = isna(na_value) if na_value_is_na: na_value = 1 elif dtype == np.dtype("bool"): # GH#55736 na_value = bool(na_value) result = lib.map_infer_mask( arr, f, mask.view("uint8"), convert=False, na_value=na_value, # error: Argument 1 to "dtype" has incompatible type # "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected # "Type[object]" dtype=np.dtype(cast(type, dtype)), ) if not na_value_is_na: mask[:] = False return constructor(result, mask) else: return self._str_map_str_or_object(dtype, na_value, arr, f, mask) def _str_map_str_or_object( self, dtype, na_value, arr: np.ndarray, f, mask: npt.NDArray[np.bool_], ): # _str_map helper for case where dtype is either string dtype or object if is_string_dtype(dtype) and not is_object_dtype(dtype): # i.e. StringDtype result = lib.map_infer_mask( arr, f, mask.view("uint8"), convert=False, na_value=na_value ) if self.dtype.storage == "pyarrow": import pyarrow as pa # TODO: shouldn't this already be caught my passed mask? # it isn't in test_extract_expand_capture_groups_index # mask = mask | np.array( # [x is libmissing.NA for x in result], dtype=bool # ) result = pa.array( result, mask=mask, type=pa.large_string(), from_pandas=True ) # error: "BaseStringArray" has no attribute "_from_pyarrow_array" return self._from_pyarrow_array(result) # type: ignore[attr-defined] else: # StringArray # error: Too many arguments for "BaseStringArray" return type(self)(result, dtype=self.dtype) # type: ignore[call-arg] else: # This is when the result type is object. We reach this when # -> We know the result type is truly object (e.g. .encode returns bytes # or .findall returns a list). # -> We don't know the result type. E.g. `.get` can return anything. return lib.map_infer_mask(arr, f, mask.view("uint8")) def _str_map_nan_semantics( self, f, na_value=lib.no_default, dtype: Dtype | None = None ): if dtype is None: dtype = self.dtype if na_value is lib.no_default: if is_bool_dtype(dtype): # NaN propagates as False na_value = False else: na_value = self.dtype.na_value mask = isna(self) arr = np.asarray(self) if is_integer_dtype(dtype) or is_bool_dtype(dtype): na_value_is_na = isna(na_value) if na_value_is_na: if is_integer_dtype(dtype): na_value = 0 else: # NaN propagates as False na_value = False result = lib.map_infer_mask( arr, f, mask.view("uint8"), convert=False, na_value=na_value, dtype=np.dtype(cast(type, dtype)), ) if na_value_is_na and is_integer_dtype(dtype) and mask.any(): # TODO: we could alternatively do this check before map_infer_mask # and adjust the dtype/na_value we pass there. Which is more # performant? result = result.astype("float64") result[mask] = np.nan return result else: return self._str_map_str_or_object(dtype, na_value, arr, f, mask) def view(self, dtype: Dtype | None = None) -> Self: if dtype is not None: raise TypeError("Cannot change data-type for string array.") return super().view() @set_module("pandas.arrays") # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is # incompatible with definition in base class "ExtensionArray" class StringArray(BaseStringArray, NumpyExtensionArray): # type: ignore[misc] """ Extension array for string data. .. warning:: StringArray is considered experimental. The implementation and parts of the API may change without warning. Parameters ---------- values : array-like The array of data. .. warning:: Currently, this expects an object-dtype ndarray where the elements are Python strings or nan-likes (``None``, ``np.nan``, ``NA``). This may change without warning in the future. Use :meth:`pandas.array` with ``dtype="string"`` for a stable way of creating a `StringArray` from any sequence. StringArray accepts array-likes containing nan-likes(``None``, ``np.nan``) for the ``values`` parameter in addition to strings and :attr:`pandas.NA` dtype : StringDtype Dtype for the array. copy : bool, default False Whether to copy the array of data. Attributes ---------- None Methods ------- None See Also -------- :func:`array` The recommended function for creating a StringArray. Series.str The string methods are available on Series backed by a StringArray. Notes ----- StringArray returns a BooleanArray for comparison methods. Examples -------- >>> pd.array(["This is", "some text", None, "data."], dtype="string") <ArrowStringArray> ['This is', 'some text', <NA>, 'data.'] Length: 4, dtype: string Unlike arrays instantiated with ``dtype="object"``, ``StringArray`` will convert the values to strings. >>> pd.array(["1", 1], dtype="object") <NumpyExtensionArray> ['1', 1] Length: 2, dtype: object >>> pd.array(["1", 1], dtype="string") <ArrowStringArray> ['1', '1'] Length: 2, dtype: string However, instantiating StringArrays directly with non-strings will raise an error. For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`: >>> pd.array(["a", None, "c"], dtype="string[python]") == "a" <BooleanArray> [True, <NA>, False] Length: 3, dtype: boolean """ # undo the NumpyExtensionArray hack _typ = "extension" def __init__( self, values, *, dtype: StringDtype | None = None, copy: bool = False ) -> None: if dtype is None: dtype = StringDtype() values = extract_array(values) super().__init__(values, copy=copy) if not isinstance(values, type(self)): self._validate(dtype) NDArrayBacked.__init__( self, self._ndarray, dtype, ) def _validate(self, dtype: StringDtype) -> None: """Validate that we only store NA or strings.""" if dtype._na_value is libmissing.NA: if len(self._ndarray) and not lib.is_string_array( self._ndarray, skipna=True ): raise ValueError( "StringArray requires a sequence of strings or pandas.NA" ) if self._ndarray.dtype != "object": raise ValueError( "StringArray requires a sequence of strings or pandas.NA. Got " f"'{self._ndarray.dtype}' dtype instead." ) # Check to see if need to convert Na values to pd.NA if self._ndarray.ndim > 2: # Ravel if ndims > 2 b/c no cythonized version available lib.convert_nans_to_NA(self._ndarray.ravel("K")) else: lib.convert_nans_to_NA(self._ndarray) else: # Validate that we only store NaN or strings. if len(self._ndarray) and not lib.is_string_array( self._ndarray, skipna=True ): raise ValueError("StringArray requires a sequence of strings or NaN") if self._ndarray.dtype != "object": raise ValueError( "StringArray requires a sequence of strings " "or NaN. Got '{self._ndarray.dtype}' dtype instead." ) # TODO validate or force NA/None to NaN def _validate_scalar(self, value): # used by NDArrayBackedExtensionIndex.insert if isna(value): return self.dtype.na_value elif not isinstance(value, str): raise TypeError( f"Invalid value '{value}' for dtype '{self.dtype}'. Value should be a " f"string or missing value, got '{type(value).__name__}' instead." ) return value @classmethod def _from_sequence( cls, scalars, *, dtype: Dtype | None = None, copy: bool = False ) -> Self: if dtype and not (isinstance(dtype, str) and dtype == "string"): dtype = pandas_dtype(dtype) assert isinstance(dtype, StringDtype) and dtype.storage == "python" elif using_string_dtype(): dtype = StringDtype(storage="python", na_value=np.nan) else: dtype = StringDtype(storage="python") from pandas.core.arrays.masked import BaseMaskedArray na_value = dtype.na_value if isinstance(scalars, BaseMaskedArray): # avoid costly conversion to object dtype na_values = scalars._mask result = scalars._data result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) result[na_values] = na_value else: if lib.is_pyarrow_array(scalars): # pyarrow array; we cannot rely on the "to_numpy" check in # ensure_string_array because calling scalars.to_numpy would set # zero_copy_only to True which caused problems see GH#52076 scalars = np.array(scalars) # convert non-na-likes to str, and nan-likes to StringDtype().na_value result = lib.ensure_string_array(scalars, na_value=na_value, copy=copy) # Manually creating new array avoids the validation step in the __init__, so is # faster. Refactor need for validation? new_string_array = cls.__new__(cls) NDArrayBacked.__init__(new_string_array, result, dtype) return new_string_array @classmethod def _from_sequence_of_strings( cls, strings, *, dtype: ExtensionDtype, copy: bool = False ) -> Self: return cls._from_sequence(strings, dtype=dtype, copy=copy) def _cast_pointwise_result(self, values) -> ArrayLike: result = super()._cast_pointwise_result(values) if isinstance(result.dtype, StringDtype): # Ensure we retain our same na_value/storage result = result.astype(self.dtype) return result @classmethod def _empty(cls, shape, dtype) -> StringArray: values = np.empty(shape, dtype=object) values[:] = dtype.na_value return cls(values, dtype=dtype).astype(dtype, copy=False) def __arrow_array__(self, type=None): """ Convert myself into a pyarrow Array. """ import pyarrow as pa if type is None: type = pa.string() values = self._ndarray.copy() values[self.isna()] = None return pa.array(values, type=type) def _values_for_factorize(self) -> tuple[np.ndarray, libmissing.NAType | float]: # type: ignore[override] arr = self._ndarray return arr, self.dtype.na_value def _maybe_convert_setitem_value(self, value): """Maybe convert value to be pyarrow compatible.""" if lib.is_scalar(value): if isna(value): value = self.dtype.na_value elif not isinstance(value, str): raise TypeError( f"Invalid value '{value}' for dtype '{self.dtype}'. Value should " f"be a string or missing value, got '{type(value).__name__}' " "instead." ) else: value = extract_array(value, extract_numpy=True) if not is_array_like(value): value = np.asarray(value, dtype=object) elif isinstance(value.dtype, type(self.dtype)): return value else: # cast categories and friends to arrays to see if values are # compatible, compatibility with arrow backed strings value = np.asarray(value) if len(value) and not lib.is_string_array(value, skipna=True): raise TypeError( "Invalid value for dtype 'str'. Value should be a " "string or missing value (or array of those)." ) return value def __setitem__(self, key, value) -> None: if self._readonly: raise ValueError("Cannot modify read-only array") value = self._maybe_convert_setitem_value(value) key = check_array_indexer(self, key) scalar_key = lib.is_scalar(key) scalar_value = lib.is_scalar(value) if scalar_key and not scalar_value: raise ValueError("setting an array element with a sequence.") if not scalar_value: if value.dtype == self.dtype: value = value._ndarray else: value = np.asarray(value) mask = isna(value) if mask.any(): value = value.copy() value[isna(value)] = self.dtype.na_value super().__setitem__(key, value) def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: # the super() method NDArrayBackedExtensionArray._putmask uses # np.putmask which doesn't properly handle None/pd.NA, so using the # base class implementation that uses __setitem__ ExtensionArray._putmask(self, mask, value) def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: # the super() method NDArrayBackedExtensionArray._where uses # np.putmask which doesn't properly handle None/pd.NA, so using the # base class implementation that uses __setitem__ return ExtensionArray._where(self, mask, value) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if isinstance(values, BaseStringArray) or ( isinstance(values, ExtensionArray) and is_string_dtype(values.dtype) ): values = values.astype(self.dtype, copy=False) else: if not lib.is_string_array(np.asarray(values), skipna=True): values = np.array( [val for val in values if isinstance(val, str) or isna(val)], dtype=object, ) if not len(values): return np.zeros(self.shape, dtype=bool) values = self._from_sequence(values, dtype=self.dtype) return isin(np.asarray(self), np.asarray(values)) def astype(self, dtype, copy: bool = True): dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self elif isinstance(dtype, IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): arr_ea = self.copy() mask = self.isna() arr_ea[mask] = "0" values = arr_ea.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif isinstance(dtype, ExtensionDtype): # Skip the NumpyExtensionArray.astype method return ExtensionArray.astype(self, dtype, copy) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype) values[mask] = np.nan return values return super().astype(dtype, copy) def _reduce( self, name: str, *, skipna: bool = True, keepdims: bool = False, axis: AxisInt | None = 0, **kwargs, ): if self.dtype.na_value is np.nan and name in ["any", "all"]: if name == "any": return nanops.nanany(self._ndarray, skipna=skipna) else: return nanops.nanall(self._ndarray, skipna=skipna) if name in ["min", "max", "argmin", "argmax", "sum"]: result = getattr(self, name)(skipna=skipna, axis=axis, **kwargs) if keepdims: return self._from_sequence([result], dtype=self.dtype) return result raise TypeError(f"Cannot perform reduction '{name}' with string dtype") def _accumulate(self, name: str, *, skipna: bool = True, **kwargs) -> StringArray: """ Return an ExtensionArray performing an accumulation operation. The underlying data type might change. Parameters ---------- name : str Name of the function, supported values are: - cummin - cummax - cumsum - cumprod skipna : bool, default True If True, skip NA values. **kwargs Additional keyword arguments passed to the accumulation function. Currently, there is no supported kwarg. Returns ------- array Raises ------ NotImplementedError : subclass does not define accumulations """ if name == "cumprod": msg = f"operation '{name}' not supported for dtype '{self.dtype}'" raise TypeError(msg) # We may need to strip out trailing NA values tail: np.ndarray | None = None na_mask: np.ndarray | None = None ndarray = self._ndarray np_func = { "cumsum": np.cumsum, "cummin": np.minimum.accumulate, "cummax": np.maximum.accumulate, }[name] if self._hasna: na_mask = cast("npt.NDArray[np.bool_]", isna(ndarray)) if np.all(na_mask): return type(self)(ndarray, dtype=self.dtype) if skipna: if name == "cumsum": ndarray = np.where(na_mask, "", ndarray) else: # We can retain the running min/max by forward/backward filling. ndarray = ndarray.copy() missing.pad_or_backfill_inplace( ndarray, method="pad", axis=0, ) missing.pad_or_backfill_inplace( ndarray, method="backfill", axis=0, ) else: # When not skipping NA values, the result should be null from # the first NA value onward. idx = np.argmax(na_mask) tail = np.empty(len(ndarray) - idx, dtype="object") tail[:] = self.dtype.na_value ndarray = ndarray[:idx] # mypy: Cannot call function of unknown type np_result = np_func(ndarray) # type: ignore[operator] if tail is not None: np_result = np.hstack((np_result, tail)) elif na_mask is not None: # Argument 2 to "where" has incompatible type "NAType | float" np_result = np.where(na_mask, self.dtype.na_value, np_result) # type: ignore[arg-type] result = type(self)(np_result, dtype=self.dtype) return result def _wrap_reduction_result(self, axis: AxisInt | None, result) -> Any: if self.dtype.na_value is np.nan and result is libmissing.NA: # the masked_reductions use pd.NA -> convert to np.nan return np.nan return super()._wrap_reduction_result(axis, result) def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: nv.validate_min((), kwargs) result = masked_reductions.min( values=self.to_numpy(), mask=self.isna(), skipna=skipna ) return self._wrap_reduction_result(axis, result) def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar: nv.validate_max((), kwargs) result = masked_reductions.max( values=self.to_numpy(), mask=self.isna(), skipna=skipna ) return self._wrap_reduction_result(axis, result) def sum( self, *, axis: AxisInt | None = None, skipna: bool = True, min_count: int = 0, **kwargs, ) -> Scalar: nv.validate_sum((), kwargs) result = masked_reductions.sum( values=self._ndarray, mask=self.isna(), skipna=skipna ) return self._wrap_reduction_result(axis, result) def value_counts(self, dropna: bool = True) -> Series: result = super().value_counts(dropna=dropna) if self.dtype.na_value is libmissing.NA: result = result.astype("Int64") return result def memory_usage(self, deep: bool = False) -> int: result = self._ndarray.nbytes if deep: return result + lib.memory_usage_of_objects(self._ndarray) return result def searchsorted( self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter | None = None, ) -> npt.NDArray[np.intp] | np.intp: """ Find indices where elements should be inserted to maintain order. Find the indices into a sorted array `self` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Assuming that `self` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``self[i-1] < value <= self[i]`` right ``self[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- value : array-like, list or scalar Value(s) to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array-like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints or int If value is array-like, array of insertion points. If value is scalar, a single integer. See Also -------- numpy.searchsorted : Similar method from NumPy. Examples -------- >>> arr = pd.array([1, 2, 3, 5]) >>> arr.searchsorted([4]) array([3]) """ if self._hasna: raise ValueError( "searchsorted requires array to be sorted, which is impossible " "with NAs present." ) return super().searchsorted(value=value, side=side, sorter=sorter) def _cmp_method(self, other, op): from pandas.arrays import ( ArrowExtensionArray, BooleanArray, ) if ( isinstance(other, BaseStringArray) and self.dtype.na_value is not libmissing.NA and other.dtype.na_value is libmissing.NA ): # NA has priority of NaN semantics return op(self.astype(other.dtype, copy=False), other) if isinstance(other, ArrowExtensionArray): if isinstance(other, BaseStringArray): # pyarrow storage has priority over python storage # (except if we have NA semantics and other not) if not ( self.dtype.na_value is libmissing.NA and other.dtype.na_value is not libmissing.NA ): return NotImplemented else: return NotImplemented if isinstance(other, StringArray): other = other._ndarray mask = isna(self) | isna(other) valid = ~mask if lib.is_list_like(other): if len(other) != len(self): # prevent improper broadcasting when other is 2D raise ValueError( f"Lengths of operands do not match: {len(self)} != {len(other)}" ) # for array-likes, first filter out NAs before converting to numpy if not is_array_like(other): other = np.asarray(other) other = other[valid] other_dtype = getattr(other, "dtype", None) if op.__name__.strip("_") in ["mul", "rmul"] and ( lib.is_bool(other) or lib.is_np_dtype(other_dtype, "b") ): # GH#62595 raise TypeError( "Cannot multiply StringArray by bools. " "Explicitly cast to integers instead." ) if op.__name__ in ops.ARITHMETIC_BINOPS: result = np.empty_like(self._ndarray, dtype="object") result[mask] = self.dtype.na_value result[valid] = op(self._ndarray[valid], other) if isinstance(other, Path): # GH#61940 return result return self._from_backing_data(result) else: # logical result = np.zeros(len(self._ndarray), dtype="bool") result[valid] = op(self._ndarray[valid], other) res_arr = BooleanArray(result, mask) if self.dtype.na_value is np.nan: if op == operator.ne: return res_arr.to_numpy(np.bool_, na_value=True) else: return res_arr.to_numpy(np.bool_, na_value=False) return res_arr _arith_method = _cmp_method def _str_zfill(self, width: int) -> Self: return self._str_map(lambda x: x.zfill(width))
python
github
https://github.com/pandas-dev/pandas
pandas/core/arrays/string_.py
package kotlinx.coroutines.flow import kotlinx.coroutines.testing.* import kotlinx.coroutines.* import kotlin.random.* import kotlin.test.* // A simplified version of StateFlowStressTest class StateFlowCommonStressTest : TestBase() { private val state = MutableStateFlow<Long>(0) @Test fun testSingleEmitterAndCollector() = runTest { var collected = 0L val collector = launch(Dispatchers.Default) { // collect, but abort and collect again after every 1000 values to stress allocation/deallocation do { val batchSize = Random.nextInt(1..1000) var index = 0 val cnt = state.onEach { value -> // the first value in batch is allowed to repeat, but cannot go back val ok = if (index++ == 0) value >= collected else value > collected check(ok) { "Values must be monotonic, but $value is not, was $collected" } collected = value }.take(batchSize).map { 1 }.sum() } while (cnt == batchSize) } var current = 1L val emitter = launch { while (true) { state.value = current++ if (current % 1000 == 0L) yield() // make it cancellable } } delay(3000) emitter.cancelAndJoin() collector.cancelAndJoin() assertTrue { current >= collected / 2 } } }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
kotlinx-coroutines-core/concurrent/test/flow/StateFlowCommonStressTest.kt
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Simple hash functions used for internal data structures #ifndef TENSORFLOW_CORE_PLATFORM_HASH_H_ #define TENSORFLOW_CORE_PLATFORM_HASH_H_ #include "tsl/platform/hash.h" namespace tensorflow { // NOLINTBEGIN(misc-unused-using-decls) using ::tsl::hash; using ::tsl::Hash32; using ::tsl::Hash64; using ::tsl::Hash64Combine; using ::tsl::Hash64CombineUnordered; using ::tsl::StringPieceHasher; // NOLINTEND(misc-unused-using-decls) } // namespace tensorflow #endif // TENSORFLOW_CORE_PLATFORM_HASH_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/platform/hash.h
# -*- coding: utf-8 -*- """Methods for going back and forth between various calendars.""" from __future__ import division import datetime from hdate.common import HebrewDate from hdate.htables import Months def get_chalakim(hours, parts): """Return the number of total parts (chalakim).""" return (hours * PARTS_IN_HOUR) + parts PARTS_IN_HOUR = 1080 PARTS_IN_DAY = 24 * PARTS_IN_HOUR PARTS_IN_WEEK = 7 * PARTS_IN_DAY PARTS_IN_MONTH = PARTS_IN_DAY + get_chalakim(12, 793) # Fix for regular month def _days_from_3744(hebrew_year): """Return: Number of days since 3,1,3744.""" # Start point for calculation is Molad new year 3744 (16BC) years_from_3744 = hebrew_year - 3744 molad_3744 = get_chalakim(1 + 6, 779) # Molad 3744 + 6 hours in parts # Time in months # Number of leap months leap_months = (years_from_3744 * 7 + 1) // 19 leap_left = (years_from_3744 * 7 + 1) % 19 # Months left of leap cycle months = years_from_3744 * 12 + leap_months # Total Number of months # Time in parts and days # Molad This year + Molad 3744 - corrections parts = months * PARTS_IN_MONTH + molad_3744 # 28 days in month + corrections days = months * 28 + parts // PARTS_IN_DAY - 2 # Time left for round date in corrections # 28 % 7 = 0 so only corrections counts parts_left_in_week = parts % PARTS_IN_WEEK parts_left_in_day = parts % PARTS_IN_DAY week_day = parts_left_in_week // PARTS_IN_DAY # pylint: disable=too-many-boolean-expressions # pylint-comment: Splitting the 'if' below might create a bug in case # the order is not kept. # Molad ד"ר ט"ג if ( ( leap_left < 12 and week_day == 3 and parts_left_in_day >= get_chalakim(9 + 6, 204) ) or # Molad ט"פקת ו"טב ( leap_left < 7 and week_day == 2 and parts_left_in_day >= get_chalakim(15 + 6, 589) ) ): days += 1 week_day += 1 # pylint: enable=too-many-boolean-expressions # ADU if week_day in (1, 4, 6): days += 1 return days def get_size_of_hebrew_year(hebrew_year): """Return: total days in hebrew year.""" return _days_from_3744(hebrew_year + 1) - _days_from_3744(hebrew_year) def gdate_to_jdn(date): """ Compute Julian day from Gregorian day, month and year. Algorithm from wikipedia's julian_day article. Return: The julian day number """ not_jan_or_feb = (14 - date.month) // 12 year_since_4800bc = date.year + 4800 - not_jan_or_feb month_since_4800bc = date.month + 12 * not_jan_or_feb - 3 jdn = ( date.day + (153 * month_since_4800bc + 2) // 5 + 365 * year_since_4800bc + (year_since_4800bc // 4 - year_since_4800bc // 100 + year_since_4800bc // 400) - 32045 ) return jdn def hdate_to_jdn(date): """ Compute Julian day from Hebrew day, month and year. Return: julian day number, 1 of tishrey julians, 1 of tishrey julians next year """ day = date.day month = date.month.value if date.month == Months.Adar_I: month = 6 if date.month == Months.Adar_II: month = 6 day += 30 # Calculate days since 1,1,3744 day = _days_from_3744(date.year) + (59 * (month - 1) + 1) // 2 + day # length of year length_of_year = get_size_of_hebrew_year(date.year) # Special cases for this year if length_of_year % 10 > 4 and month > 2: # long Heshvan day += 1 if length_of_year % 10 < 4 and month > 3: # short Kislev day -= 1 if length_of_year > 365 and month > 6: # leap year day += 30 # adjust to julian return day + 1715118 def jdn_to_gdate(jdn): """ Convert from the Julian day to the Gregorian day. Algorithm from 'Julian and Gregorian Day Numbers' by Peter Meyer. Return: day, month, year """ # pylint: disable=invalid-name # The algorithm is a verbatim copy from Peter Meyer's article # No explanation in the article is given for the variables # Hence the exceptions for pylint and for flake8 (E741) l = jdn + 68569 # noqa: E741 n = (4 * l) // 146097 l = l - (146097 * n + 3) // 4 # noqa: E741 i = (4000 * (l + 1)) // 1461001 # that's 1,461,001 l = l - (1461 * i) // 4 + 31 # noqa: E741 j = (80 * l) // 2447 day = l - (2447 * j) // 80 l = j // 11 # noqa: E741 month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l # that's a lower-case L return datetime.date(year, month, day) def jdn_to_hdate(jdn): """Convert from the Julian day to the Hebrew day.""" # calculate Gregorian date date = jdn_to_gdate(jdn) # Guess Hebrew year is Gregorian year + 3760 year = date.year + 3760 jdn_tishrey1 = hdate_to_jdn(HebrewDate(year, Months.Tishrei, 1)) jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, Months.Tishrei, 1)) # Check if computed year was underestimated if jdn_tishrey1_next_year <= jdn: year = year + 1 jdn_tishrey1 = jdn_tishrey1_next_year jdn_tishrey1_next_year = hdate_to_jdn(HebrewDate(year + 1, Months.Tishrei, 1)) size_of_year = get_size_of_hebrew_year(year) # days into this year, first month 0..29 days = jdn - jdn_tishrey1 # last 8 months always have 236 days if days >= (size_of_year - 236): # in last 8 months days = days - (size_of_year - 236) month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 4 + 1 # if leap if size_of_year > 355 and month <= 6: month = month + 8 else: # in 4-5 first months # Special cases for this year if size_of_year % 10 > 4 and days == 59: # long Heshvan (day 30) month = 1 day = 30 elif size_of_year % 10 > 4 and days > 59: # long Heshvan month = (days - 1) * 2 // 59 day = days - (month * 59 + 1) // 2 elif size_of_year % 10 < 4 and days > 87: # short kislev month = (days + 1) * 2 // 59 day = days - (month * 59 + 1) // 2 + 2 else: # regular months month = days * 2 // 59 day = days - (month * 59 + 1) // 2 + 1 month = month + 1 return HebrewDate(year, Months(month), day)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright (c) 2015, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. import logging from vispy.util import logger, use_log_level from vispy.testing import (assert_in, assert_not_in, run_tests_if_main, assert_equal) def test_logging(): """Test logging context manager""" ll = logger.level with use_log_level('warning', print_msg=False): assert_equal(logger.level, logging.WARN) assert_equal(logger.level, ll) with use_log_level('debug', print_msg=False): assert_equal(logger.level, logging.DEBUG) assert_equal(logger.level, ll) def test_debug_logging(): """Test advanced debugging logging""" with use_log_level('debug', 'Selected', True, False) as l: logger.debug('Selected foo') assert_equal(len(l), 1) assert_in('test_logging', l[0]) # can't really parse this location with use_log_level('debug', record=True, print_msg=False) as l: logger.debug('foo') assert_equal(len(l), 1) assert_in('test_logging', l[0]) with use_log_level('debug', 'foo', True, False) as l: logger.debug('bar') assert_equal(len(l), 0) with use_log_level('info', record=True, print_msg=False) as l: logger.debug('foo') logger.info('bar') assert_equal(len(l), 1) assert_not_in('unknown', l[0]) run_tests_if_main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ce_ip_interface version_added: "2.4" short_description: Manages L3 attributes for IPv4 and IPv6 interfaces on HUAWEI CloudEngine switches. description: - Manages Layer 3 attributes for IPv4 and IPv6 interfaces on HUAWEI CloudEngine switches. author: QijunPan (@CloudEngine-Ansible) notes: - Interface must already be a L3 port when using this module. - Logical interfaces (loopback, vlanif) must be created first. - C(mask) must be inserted in decimal format (i.e. 24) for both IPv6 and IPv4. - A single interface can have multiple IPv6 configured. options: interface: description: - Full name of interface, i.e. 40GE1/0/22, vlanif10. required: true addr: description: - IPv4 or IPv6 Address. required: false default: null mask: description: - Subnet mask for IPv4 or IPv6 Address in decimal format. required: false default: null version: description: - IP address version. required: false default: v4 choices: ['v4','v6'] ipv4_type: description: - Specifies an address type. The value is an enumerated type. main, primary IP address. sub, secondary IP address. required: false default: main choices: ['main','sub'] state: description: - Specify desired state of the resource. required: false default: present choices: ['present','absent'] ''' EXAMPLES = ''' - name: ip_interface module test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Ensure ipv4 address is configured on 10GE1/0/22 ce_ip_interface: interface: 10GE1/0/22 version: v4 state: present addr: 20.20.20.20 mask: 24 provider: '{{ cli }}' - name: Ensure ipv4 secondary address is configured on 10GE1/0/22 ce_ip_interface: interface: 10GE1/0/22 version: v4 state: present addr: 30.30.30.30 mask: 24 ipv4_type: sub provider: '{{ cli }}' - name: Ensure ipv6 is enabled on 10GE1/0/22 ce_ip_interface: interface: 10GE1/0/22 version: v6 state: present provider: '{{ cli }}' - name: Ensure ipv6 address is configured on 10GE1/0/22 ce_ip_interface: interface: 10GE1/0/22 version: v6 state: present addr: 2001::db8:800:200c:cccb mask: 64 provider: '{{ cli }}' ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"addr": "20.20.20.20", "interface": "10GE1/0/22", "mask": "24"} existing: description: k/v pairs of existing IP attributes on the interface returned: always type: dict sample: {"ipv4": [{"ifIpAddr": "11.11.11.11", "subnetMask": "255.255.0.0", "addrType": "main"}], "interface": "10GE1/0/22"} end_state: description: k/v pairs of IP attributes after module execution returned: always type: dict sample: {"ipv4": [{"ifIpAddr": "20.20.20.20", "subnetMask": "255.255.255.0", "addrType": "main"}], "interface": "10GE1/0/22"} updates: description: commands sent to the device returned: always type: list sample: ["interface 10GE1/0/22", "ip address 20.20.20.20 24"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec CE_NC_GET_INTF = """ <filter type="subtree"> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <isL2SwitchPort></isL2SwitchPort> <ifmAm4> </ifmAm4> <ifmAm6> </ifmAm6> </interface> </interfaces> </ifm> </filter> """ CE_NC_ADD_IPV4 = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm4> <am4CfgAddrs> <am4CfgAddr operation="merge"> <ifIpAddr>%s</ifIpAddr> <subnetMask>%s</subnetMask> <addrType>%s</addrType> </am4CfgAddr> </am4CfgAddrs> </ifmAm4> </interface> </interfaces> </ifm> </config> """ CE_NC_MERGE_IPV4 = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm4> <am4CfgAddrs> <am4CfgAddr operation="delete"> <ifIpAddr>%s</ifIpAddr> <subnetMask>%s</subnetMask> <addrType>main</addrType> </am4CfgAddr> <am4CfgAddr operation="merge"> <ifIpAddr>%s</ifIpAddr> <subnetMask>%s</subnetMask> <addrType>main</addrType> </am4CfgAddr> </am4CfgAddrs> </ifmAm4> </interface> </interfaces> </ifm> </config> """ CE_NC_DEL_IPV4 = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm4> <am4CfgAddrs> <am4CfgAddr operation="delete"> <ifIpAddr>%s</ifIpAddr> <subnetMask>%s</subnetMask> <addrType>%s</addrType> </am4CfgAddr> </am4CfgAddrs> </ifmAm4> </interface> </interfaces> </ifm> </config> """ CE_NC_ADD_IPV6 = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm6> <am6CfgAddrs> <am6CfgAddr operation="merge"> <ifIp6Addr>%s</ifIp6Addr> <addrPrefixLen>%s</addrPrefixLen> <addrType6>global</addrType6> </am6CfgAddr> </am6CfgAddrs> </ifmAm6> </interface> </interfaces> </ifm> </config> """ CE_NC_DEL_IPV6 = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm6> <am6CfgAddrs> <am6CfgAddr operation="delete"> <ifIp6Addr>%s</ifIp6Addr> <addrPrefixLen>%s</addrPrefixLen> <addrType6>global</addrType6> </am6CfgAddr> </am6CfgAddrs> </ifmAm6> </interface> </interfaces> </ifm> </config> """ CE_NC_MERGE_IPV6_ENABLE = """ <config> <ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <interfaces> <interface> <ifName>%s</ifName> <ifmAm6 operation="merge"> <enableFlag>%s</enableFlag> </ifmAm6> </interface> </interfaces> </ifm> </config> """ def get_interface_type(interface): """Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" if interface is None: return None iftype = None if interface.upper().startswith('GE'): iftype = 'ge' elif interface.upper().startswith('10GE'): iftype = '10ge' elif interface.upper().startswith('25GE'): iftype = '25ge' elif interface.upper().startswith('4X10GE'): iftype = '4x10ge' elif interface.upper().startswith('40GE'): iftype = '40ge' elif interface.upper().startswith('100GE'): iftype = '100ge' elif interface.upper().startswith('VLANIF'): iftype = 'vlanif' elif interface.upper().startswith('LOOPBACK'): iftype = 'loopback' elif interface.upper().startswith('METH'): iftype = 'meth' elif interface.upper().startswith('ETH-TRUNK'): iftype = 'eth-trunk' elif interface.upper().startswith('VBDIF'): iftype = 'vbdif' elif interface.upper().startswith('NVE'): iftype = 'nve' elif interface.upper().startswith('TUNNEL'): iftype = 'tunnel' elif interface.upper().startswith('ETHERNET'): iftype = 'ethernet' elif interface.upper().startswith('FCOE-PORT'): iftype = 'fcoe-port' elif interface.upper().startswith('FABRIC-PORT'): iftype = 'fabric-port' elif interface.upper().startswith('STACK-PORT'): iftype = 'stack-port' elif interface.upper().startswith('NULL'): iftype = 'null' else: return None return iftype.lower() def is_valid_v4addr(addr): """check is ipv4 addr is valid""" if not addr: return False if addr.find('.') != -1: addr_list = addr.split('.') if len(addr_list) != 4: return False for each_num in addr_list: if not each_num.isdigit(): return False if int(each_num) > 255: return False return True return False class IpInterface(object): """ Manages L3 attributes for IPv4 and IPv6 interfaces. """ def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.__init_module__() # module input info] self.interface = self.module.params['interface'] self.addr = self.module.params['addr'] self.mask = self.module.params['mask'] self.version = self.module.params['version'] self.ipv4_type = self.module.params['ipv4_type'] self.state = self.module.params['state'] # state self.changed = False self.updates_cmd = list() self.results = dict() self.proposed = dict() self.existing = dict() self.end_state = dict() # interface info self.intf_info = dict() self.intf_type = None def __init_module__(self): """ init module """ required_if = [("version", "v4", ("addr", "mask"))] required_together = [("addr", "mask")] self.module = AnsibleModule( argument_spec=self.spec, required_if=required_if, required_together=required_together, supports_check_mode=True ) def netconf_set_config(self, xml_str, xml_name): """ netconf set config """ rcv_xml = set_nc_config(self.module, xml_str) if "<ok/>" not in rcv_xml: self.module.fail_json(msg='Error: %s failed.' % xml_name) def get_interface_dict(self, ifname): """ get one interface attributes dict.""" intf_info = dict() conf_str = CE_NC_GET_INTF % ifname rcv_xml = get_nc_config(self.module, conf_str) if "<data/>" in rcv_xml: return intf_info # get interface base info intf = re.findall( r'.*<ifName>(.*)</ifName>.*\s*' r'<isL2SwitchPort>(.*)</isL2SwitchPort>.*', rcv_xml) if intf: intf_info = dict(ifName=intf[0][0], isL2SwitchPort=intf[0][1]) # get interface ipv4 address info ipv4_info = re.findall( r'.*<ifIpAddr>(.*)</ifIpAddr>.*\s*<subnetMask>(.*)' r'</subnetMask>.*\s*<addrType>(.*)</addrType>.*', rcv_xml) intf_info["am4CfgAddr"] = list() for info in ipv4_info: intf_info["am4CfgAddr"].append( dict(ifIpAddr=info[0], subnetMask=info[1], addrType=info[2])) # get interface ipv6 address info ipv6_info = re.findall( r'.*<ifmAm6>.*\s*<enableFlag>(.*)</enableFlag>.*', rcv_xml) if not ipv6_info: self.module.fail_json(msg='Error: Fail to get interface %s IPv6 state.' % self.interface) else: intf_info["enableFlag"] = ipv6_info[0] # get interface ipv6 enable info ipv6_info = re.findall( r'.*<ifIp6Addr>(.*)</ifIp6Addr>.*\s*<addrPrefixLen>(.*)' r'</addrPrefixLen>.*\s*<addrType6>(.*)</addrType6>.*', rcv_xml) intf_info["am6CfgAddr"] = list() for info in ipv6_info: intf_info["am6CfgAddr"].append( dict(ifIp6Addr=info[0], addrPrefixLen=info[1], addrType6=info[2])) return intf_info def convert_len_to_mask(self, masklen): """convert mask length to ip address mask, i.e. 24 to 255.255.255.0""" mask_int = ["0"] * 4 length = int(masklen) if length > 32: self.module.fail_json(msg='Error: IPv4 ipaddress mask length is invalid.') if length < 8: mask_int[0] = str(int((0xFF << (8 - length % 8)) & 0xFF)) if length >= 8: mask_int[0] = '255' mask_int[1] = str(int((0xFF << (16 - (length % 16))) & 0xFF)) if length >= 16: mask_int[1] = '255' mask_int[2] = str(int((0xFF << (24 - (length % 24))) & 0xFF)) if length >= 24: mask_int[2] = '255' mask_int[3] = str(int((0xFF << (32 - (length % 32))) & 0xFF)) if length == 32: mask_int[3] = '255' return '.'.join(mask_int) def is_ipv4_exist(self, addr, maskstr, ipv4_type): """"Check IPv4 address exist""" addrs = self.intf_info["am4CfgAddr"] if not addrs: return False for address in addrs: if address["ifIpAddr"] == addr: return address["subnetMask"] == maskstr and address["addrType"] == ipv4_type return False def get_ipv4_main_addr(self): """get IPv4 main address""" addrs = self.intf_info["am4CfgAddr"] if not addrs: return None for address in addrs: if address["addrType"] == "main": return address return None def is_ipv6_exist(self, addr, masklen): """Check IPv6 address exist""" addrs = self.intf_info["am6CfgAddr"] if not addrs: return False for address in addrs: if address["ifIp6Addr"] == addr.upper(): if address["addrPrefixLen"] == masklen and address["addrType6"] == "global": return True else: self.module.fail_json( msg="Error: Input IPv6 address or mask is invalid.") return False def set_ipv4_addr(self, ifname, addr, mask, ipv4_type): """Set interface IPv4 address""" if not addr or not mask or not type: return maskstr = self.convert_len_to_mask(mask) if self.state == "present": if not self.is_ipv4_exist(addr, maskstr, ipv4_type): # primary IP address if ipv4_type == "main": main_addr = self.get_ipv4_main_addr() if not main_addr: # no ipv4 main address in this interface xml_str = CE_NC_ADD_IPV4 % (ifname, addr, maskstr, ipv4_type) self.netconf_set_config(xml_str, "ADD_IPV4_ADDR") else: # remove old address and set new xml_str = CE_NC_MERGE_IPV4 % (ifname, main_addr["ifIpAddr"], main_addr["subnetMask"], addr, maskstr) self.netconf_set_config(xml_str, "MERGE_IPV4_ADDR") # secondary IP address else: xml_str = CE_NC_ADD_IPV4 % (ifname, addr, maskstr, ipv4_type) self.netconf_set_config(xml_str, "ADD_IPV4_ADDR") self.updates_cmd.append("interface %s" % ifname) if ipv4_type == "main": self.updates_cmd.append("ip address %s %s" % (addr, maskstr)) else: self.updates_cmd.append("ip address %s %s sub" % (addr, maskstr)) self.changed = True else: if self.is_ipv4_exist(addr, maskstr, ipv4_type): xml_str = CE_NC_DEL_IPV4 % (ifname, addr, maskstr, ipv4_type) self.netconf_set_config(xml_str, "DEL_IPV4_ADDR") self.updates_cmd.append("interface %s" % ifname) if ipv4_type == "main": self.updates_cmd.append("undo ip address %s %s" % (addr, maskstr)) else: self.updates_cmd.append("undo ip address %s %s sub" % (addr, maskstr)) self.changed = True def set_ipv6_addr(self, ifname, addr, mask): """Set interface IPv6 address""" if not addr or not mask: return if self.state == "present": self.updates_cmd.append("interface %s" % ifname) if self.intf_info["enableFlag"] == "false": xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "true") self.netconf_set_config(xml_str, "SET_IPV6_ENABLE") self.updates_cmd.append("ipv6 enable") self.changed = True if not self.is_ipv6_exist(addr, mask): xml_str = CE_NC_ADD_IPV6 % (ifname, addr, mask) self.netconf_set_config(xml_str, "ADD_IPV6_ADDR") self.updates_cmd.append("ipv6 address %s %s" % (addr, mask)) self.changed = True if not self.changed: self.updates_cmd.pop() else: if self.is_ipv6_exist(addr, mask): xml_str = CE_NC_DEL_IPV6 % (ifname, addr, mask) self.netconf_set_config(xml_str, "DEL_IPV6_ADDR") self.updates_cmd.append("interface %s" % ifname) self.updates_cmd.append( "undo ipv6 address %s %s" % (addr, mask)) self.changed = True def set_ipv6_enable(self, ifname): """Set interface IPv6 enable""" if self.state == "present": if self.intf_info["enableFlag"] == "false": xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "true") self.netconf_set_config(xml_str, "SET_IPV6_ENABLE") self.updates_cmd.append("interface %s" % ifname) self.updates_cmd.append("ipv6 enable") self.changed = True else: if self.intf_info["enableFlag"] == "true": xml_str = CE_NC_MERGE_IPV6_ENABLE % (ifname, "false") self.netconf_set_config(xml_str, "SET_IPV6_DISABLE") self.updates_cmd.append("interface %s" % ifname) self.updates_cmd.append("undo ipv6 enable") self.changed = True def check_params(self): """Check all input params""" # check interface type if self.interface: self.intf_type = get_interface_type(self.interface) if not self.intf_type: self.module.fail_json( msg='Error: Interface name of %s ' 'is error.' % self.interface) # ipv4 addr and mask check if self.version == "v4": if not is_valid_v4addr(self.addr): self.module.fail_json( msg='Error: The %s is not a valid address.' % self.addr) if not self.mask.isdigit(): self.module.fail_json(msg='Error: mask is invalid.') if int(self.mask) > 32 or int(self.mask) < 1: self.module.fail_json( msg='Error: mask must be an integer between 1 and 32.') # ipv6 mask check if self.version == "v6": if self.addr: if not self.mask.isdigit(): self.module.fail_json(msg='Error: mask is invalid.') if int(self.mask) > 128 or int(self.mask) < 1: self.module.fail_json( msg='Error: mask must be an integer between 1 and 128.') # interface and layer3 check self.intf_info = self.get_interface_dict(self.interface) if not self.intf_info: self.module.fail_json(msg='Error: interface %s does not exist.' % self.interface) if self.intf_info["isL2SwitchPort"] == "true": self.module.fail_json(msg='Error: interface %s is layer2.' % self.interface) def get_proposed(self): """get proposed info""" self.proposed["state"] = self.state self.proposed["addr"] = self.addr self.proposed["mask"] = self.mask self.proposed["ipv4_type"] = self.ipv4_type self.proposed["version"] = self.version self.proposed["interface"] = self.interface def get_existing(self): """get existing info""" self.existing["interface"] = self.interface self.existing["ipv4addr"] = self.intf_info["am4CfgAddr"] self.existing["ipv6addr"] = self.intf_info["am6CfgAddr"] self.existing["ipv6enalbe"] = self.intf_info["enableFlag"] def get_end_state(self): """get end state info""" intf_info = self.get_interface_dict(self.interface) self.end_state["interface"] = self.interface self.end_state["ipv4addr"] = intf_info["am4CfgAddr"] self.end_state["ipv6addr"] = intf_info["am6CfgAddr"] self.end_state["ipv6enalbe"] = intf_info["enableFlag"] def work(self): """worker""" self.check_params() self.get_existing() self.get_proposed() # deal present or absent if self.version == "v4": self.set_ipv4_addr(self.interface, self.addr, self.mask, self.ipv4_type) else: if not self.addr and not self.mask: self.set_ipv6_enable(self.interface) else: self.set_ipv6_addr(self.interface, self.addr, self.mask) self.get_end_state() self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def main(): """Module main""" argument_spec = dict( interface=dict(required=True), addr=dict(required=False), version=dict(required=False, choices=['v4', 'v6'], default='v4'), mask=dict(type='str', required=False), ipv4_type=dict(required=False, choices=['main', 'sub'], default='main'), state=dict(required=False, default='present', choices=['present', 'absent']) ) argument_spec.update(ce_argument_spec) module = IpInterface(argument_spec) module.work() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com> # Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - Alexander Bulimov (@abulimov) module: lvg short_description: Configure LVM volume groups description: - This module creates, removes or resizes volume groups. version_added: "1.1" options: vg: description: - The name of the volume group. required: true pvs: description: - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group. - The module will take care of running pvcreate if needed. pesize: description: - The size of the physical extent. pesize must be a power of 2, or multiple of 128KiB. Since version 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. default: 4 pv_options: description: - Additional options to pass to C(pvcreate) when creating the volume group. version_added: "2.4" vg_options: description: - Additional options to pass to C(vgcreate) when creating the volume group. version_added: "1.6" state: description: - Control if the volume group exists. choices: [ absent, present ] default: present force: description: - If C(yes), allows to remove volume group with logical volumes. type: bool default: 'no' notes: - This module does not modify PE size for already present volume group. ''' EXAMPLES = ''' - name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB lvg: vg: vg.services pvs: /dev/sda1 pesize: 32 - name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB lvg: vg: vg.services pvs: /dev/sdb pesize: 128K # If, for example, we already have VG vg.services on top of /dev/sdb1, # this VG will be extended by /dev/sdc5. Or if vg.services was created on # top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, # and then reduce by /dev/sda5. - name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. lvg: vg: vg.services pvs: /dev/sdb1,/dev/sdc5 - name: Remove a volume group with name vg.services lvg: vg: vg.services state: absent ''' import os from ansible.module_utils.basic import AnsibleModule def parse_vgs(data): vgs = [] for line in data.splitlines(): parts = line.strip().split(';') vgs.append({ 'name': parts[0], 'pv_count': int(parts[1]), 'lv_count': int(parts[2]), }) return vgs def find_mapper_device_name(module, dm_device): dmsetup_cmd = module.get_bin_path('dmsetup', True) mapper_prefix = '/dev/mapper/' rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) if rc != 0: module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) mapper_device = mapper_prefix + dm_name.rstrip() return mapper_device def parse_pvs(module, data): pvs = [] dm_prefix = '/dev/dm-' for line in data.splitlines(): parts = line.strip().split(';') if parts[0].startswith(dm_prefix): parts[0] = find_mapper_device_name(module, parts[0]) pvs.append({ 'name': parts[0], 'vg_name': parts[1], }) return pvs def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), pvs=dict(type='list'), pesize=dict(type='str', default=4), pv_options=dict(type='str', default=''), vg_options=dict(type='str', default=''), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), ), supports_check_mode=True, ) vg = module.params['vg'] state = module.params['state'] force = module.boolean(module.params['force']) pesize = module.params['pesize'] pvoptions = module.params['pv_options'].split() vgoptions = module.params['vg_options'].split() dev_list = [] if module.params['pvs']: dev_list = module.params['pvs'] elif state == 'present': module.fail_json(msg="No physical volumes given.") # LVM always uses real paths not symlinks so replace symlinks with actual path for idx, dev in enumerate(dev_list): dev_list[idx] = os.path.realpath(dev) if state == 'present': # check given devices for test_dev in dev_list: if not os.path.exists(test_dev): module.fail_json(msg="Device %s not found." % test_dev) # get pv list pvs_cmd = module.get_bin_path('pvs', True) if dev_list: pvs_filter = ' || '. join(['pv_name = {0}'.format(x) for x in dev_list]) pvs_filter = "--select '%s'" % pvs_filter else: pvs_filter = '' rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) if rc != 0: module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) # check pv for devices pvs = parse_pvs(module, current_pvs) used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] if used_pvs: module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) vgs_cmd = module.get_bin_path('vgs', True) rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) if rc != 0: module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) changed = False vgs = parse_vgs(current_vgs) for test_vg in vgs: if test_vg['name'] == vg: this_vg = test_vg break else: this_vg = None if this_vg is None: if state == 'present': # create VG if module.check_mode: changed = True else: # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) if rc == 0: changed = True else: module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) else: if state == 'absent': if module.check_mode: module.exit_json(changed=True) else: if this_vg['lv_count'] == 0 or force: # remove VG vgremove_cmd = module.get_bin_path('vgremove', True) rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) else: module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg)) # resize VG current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) if devs_to_add or devs_to_remove: if module.check_mode: changed = True else: if devs_to_add: devs_to_add_string = ' '.join(devs_to_add) # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) # add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) # remove some PV from our VG if devs_to_remove: devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) module.exit_json(changed=changed) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause %YAML 1.2 --- $id: http://devicetree.org/schemas/mfd/mps,mp2629.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: MP2629 Battery Charger PMIC from Monolithic Power System. maintainers: - Saravanan Sekar <sravanhome@gmail.com> description: | MP2629 is a PMIC providing battery charging and power supply for smartphones, wireless camera and portable devices. Chip is controlled over I2C. The battery charge management device handles battery charger controller and ADC IIO device for battery, system voltage properties: compatible: enum: - mps,mp2629 - mps,mp2733 reg: maxItems: 1 interrupts: maxItems: 1 interrupt-controller: true "#interrupt-cells": const: 2 description: The first cell is the IRQ number, the second cell is the trigger type. required: - compatible - reg - interrupts - interrupt-controller - "#interrupt-cells" additionalProperties: false examples: - | #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/input/linux-event-codes.h> i2c { #address-cells = <1>; #size-cells = <0>; pmic@4b { compatible = "mps,mp2629"; reg = <0x4b>; interrupt-controller; interrupt-parent = <&gpio2>; #interrupt-cells = <2>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; }; };
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
# GUI Application automation and testing library # Copyright (C) 2006-2018 Mark Mc Mahon and Contributors # https://github.com/pywinauto/pywinauto/graphs/contributors # http://pywinauto.readthedocs.io/en/latest/credits.html # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of pywinauto nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Wrap various UIA windows controls""" import locale import comtypes import six from .. import uia_element_info from .. import findbestmatch from .. import timings from .. import uia_defines as uia_defs from . import uiawrapper from . import win32_controls from . import common_controls from ..uia_element_info import UIAElementInfo from ..uia_defines import IUIA from ..uia_defines import NoPatternInterfaceError from ..uia_defines import toggle_state_on from ..uia_defines import get_elem_interface # ==================================================================== class ButtonWrapper(uiawrapper.UIAWrapper): """Wrap a UIA-compatible Button, CheckBox or RadioButton control""" _control_types = ['Button', 'CheckBox', 'RadioButton', ] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(ButtonWrapper, self).__init__(elem) # ----------------------------------------------------------- def toggle(self): """ An interface to Toggle method of the Toggle control pattern. Control supporting the Toggle pattern cycles through its toggle states in the following order: ToggleState_On, ToggleState_Off and, if supported, ToggleState_Indeterminate Usually applied for the check box control. The radio button control does not implement IToggleProvider, because it is not capable of cycling through its valid states. Toggle a state of a check box control. (Use 'select' method instead) Notice, a radio button control isn't supported by UIA. https://msdn.microsoft.com/en-us/library/windows/desktop/ee671290(v=vs.85).aspx """ name = self.element_info.name control_type = self.element_info.control_type self.iface_toggle.Toggle() if name and control_type: self.actions.log('Toggled ' + control_type.lower() + ' "' + name + '"') # Return itself so that action can be chained return self # ----------------------------------------------------------- def get_toggle_state(self): """ Get a toggle state of a check box control. The toggle state is represented by an integer 0 - unchecked 1 - checked 2 - indeterminate The following constants are defined in the uia_defines module toggle_state_off = 0 toggle_state_on = 1 toggle_state_inderteminate = 2 """ return self.iface_toggle.CurrentToggleState # ----------------------------------------------------------- def is_dialog(self): """Buttons are never dialogs so return False""" return False # ----------------------------------------------------------- def click(self): """Click the Button control by using Invoke or Select patterns""" try: self.invoke() except NoPatternInterfaceError: self.select() # Return itself so that action can be chained return self # ==================================================================== class ComboBoxWrapper(uiawrapper.UIAWrapper): """Wrap a UIA CoboBox control""" _control_types = ['ComboBox'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(ComboBoxWrapper, self).__init__(elem) # ----------------------------------------------------------- def expand(self): if self.is_expanded(): return self try: super(ComboBoxWrapper, self).expand() except NoPatternInterfaceError: # workaround for WinForms combo box using Open button open_buttons = self.children(title='Open', control_type='Button') if open_buttons: open_buttons[0].invoke() else: try: self.invoke() except NoPatternInterfaceError: raise NoPatternInterfaceError('There is no ExpandCollapsePattern and ' \ 'no "Open" button in .children(). Maybe only .click_input() would help to expand.') return self # ----------------------------------------------------------- def collapse(self): if not self.is_expanded(): return self try: super(ComboBoxWrapper, self).collapse() except NoPatternInterfaceError: # workaround for WinForms combo box using Open button close_buttons = self.children(title='Close', control_type='Button') if not close_buttons: if self.element_info.framework_id == 'WinForm': return self # simple WinForms combo box is always expanded else: raise RuntimeError('There is no ExpandCollapsePattern and no "Close" button for the combo box') if self.is_editable(): close_buttons[0].click_input() else: close_buttons[0].invoke() return self # ----------------------------------------------------------- def is_editable(self): edit_children = self.children(control_type="Edit") return len(edit_children) > 0 # ----------------------------------------------------------- def get_expand_state(self): try: return super(ComboBoxWrapper, self).get_expand_state() except NoPatternInterfaceError: # workaround for WinForms combo box children_list = self.children(control_type="List") if children_list and children_list[0].is_visible(): if self.element_info.framework_id == 'Qt': # TODO: find the way to get expand_collapse_state return uia_defs.expand_state_collapsed return uia_defs.expand_state_expanded else: return uia_defs.expand_state_collapsed # ----------------------------------------------------------- def texts(self): """Return the text of the items in the combobox""" texts = self._texts_from_item_container() if len(texts): # flatten the list return [ t for lst in texts for t in lst ] # ComboBox has to be expanded to populate a list of its children items try: super(ComboBoxWrapper, self).expand() for c in self.children(): texts.append(c.window_text()) except NoPatternInterfaceError: children_lists = self.children(control_type='List') if children_lists: # workaround for Qt5 and WinForms return children_lists[0].children_texts() elif self.handle: # workaround using "win32" backend win32_combo = win32_controls.ComboBoxWrapper(self.handle) texts.extend(win32_combo.item_texts()) else: # Make sure we collapse back super(ComboBoxWrapper, self).collapse() return texts # ----------------------------------------------------------- def select(self, item): """ Select the ComboBox item The item can be either a 0 based index of the item to select or it can be the string that you want to select """ # ComboBox has to be expanded to populate a list of its children items self.expand() try: self._select(item) except (IndexError, NoPatternInterfaceError): # Try to access the underlying ListBox explicitly children_lst = self.children(control_type='List') if len(children_lst) > 0: children_lst[0]._select(item) # do health check and apply workaround for Qt5 combo box if necessary if isinstance(item, six.string_types): item = children_lst[0].children(title=item)[0] if self.selected_text() != item: # workaround for WinForms combo box item.invoke() if self.selected_text() != item: # workaround for Qt5 combo box item.click_input() if self.selected_text() != item: item.click_input() elif self.selected_index() != item: items = children_lst[0].children(control_type='ListItem') if item < len(items): items[item].invoke() else: raise IndexError('Item number #{} is out of range ' \ '({} items in total)'.format(item, len(items))) else: raise IndexError("item '{0}' not found or can't be accessed".format(item)) finally: # Make sure we collapse back in any case self.collapse() return self # ----------------------------------------------------------- # TODO: add selected_texts for a combobox with a multi-select support def selected_text(self): """ Return the selected text or None Notice, that in case of multi-select it will be only the text from a first selected item """ try: selection = self.get_selection() if selection: return selection[0].name else: return None except NoPatternInterfaceError: # Try to fall back to Value interface pattern return self.iface_value.CurrentValue # ----------------------------------------------------------- # TODO: add selected_indices for a combobox with multi-select support def selected_index(self): """Return the selected index""" try: return self.selected_item_index() except NoPatternInterfaceError: # workaround for Qt5 and WinForms return self.texts().index(self.selected_text()) # ----------------------------------------------------------- def item_count(self): """ Return the number of items in the combobox The interface is kept mostly for a backward compatibility with the native ComboBox interface """ children_list = self.children(control_type="List") if children_list: return children_list[0].control_count() else: self.expand() try: children_list = self.children(control_type="List") if children_list: return children_list[0].control_count() else: return self.control_count() finally: self.collapse() # ==================================================================== class EditWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Edit control""" # TODO: this class supports only 1-line textboxes so there is no point # TODO: in methods such as line_count(), line_length(), get_line(), etc _control_types = ['Edit'] has_title = False # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(EditWrapper, self).__init__(elem) # ----------------------------------------------------------- @property def writable_props(self): """Extend default properties list.""" props = super(EditWrapper, self).writable_props props.extend(['selection_indices']) return props # ----------------------------------------------------------- def line_count(self): """Return how many lines there are in the Edit""" return self.window_text().count("\n") + 1 # ----------------------------------------------------------- def line_length(self, line_index): """Return how many characters there are in the line""" # need to first get a character index of that line lines = self.window_text().splitlines() if line_index < len(lines): return len(lines[line_index]) elif line_index == self.line_count() - 1: return 0 else: raise IndexError("There are only {0} lines but given index is {1}".format(self.line_count(), line_index)) # ----------------------------------------------------------- def get_line(self, line_index): """Return the line specified""" lines = self.window_text().splitlines() if line_index < len(lines): return lines[line_index] elif line_index == self.line_count() - 1: return "" else: raise IndexError("There are only {0} lines but given index is {1}".format(self.line_count(), line_index)) # ----------------------------------------------------------- def get_value(self): """Return the current value of the element""" return self.iface_value.CurrentValue # ----------------------------------------------------------- def is_editable(self): """Return the edit possibility of the element""" return not self.iface_value.CurrentIsReadOnly # ----------------------------------------------------------- def texts(self): """Get the text of the edit control""" texts = [ self.get_line(i) for i in range(self.line_count()) ] return texts # ----------------------------------------------------------- def text_block(self): """Get the text of the edit control""" return self.window_text() # ----------------------------------------------------------- def selection_indices(self): """The start and end indices of the current selection""" selected_text = self.iface_text.GetSelection().GetElement(0).GetText(-1) start = self.window_text().find(selected_text) end = start + len(selected_text) return (start, end) # ----------------------------------------------------------- def set_window_text(self, text, append=False): """Override set_window_text for edit controls because it should not be used for Edit controls. Edit Controls should either use set_edit_text() or type_keys() to modify the contents of the edit control. """ self.verify_actionable() if append: text = self.window_text() + text self.set_focus() # Set text using IUIAutomationValuePattern self.iface_value.SetValue(text) raise UserWarning("set_window_text() should probably not be called for Edit Controls") # ----------------------------------------------------------- def set_edit_text(self, text, pos_start=None, pos_end=None): """Set the text of the edit control""" self.verify_actionable() # allow one or both of pos_start and pos_end to be None if pos_start is not None or pos_end is not None: # if only one has been specified - then set the other # to the current selection start or end start, end = self.selection_indices() if pos_start is None: pos_start = start if pos_end is None and not isinstance(start, six.string_types): pos_end = end else: pos_start = 0 pos_end = len(self.window_text()) if isinstance(text, six.text_type): if six.PY3: aligned_text = text else: aligned_text = text.encode(locale.getpreferredencoding()) elif isinstance(text, six.binary_type): if six.PY3: aligned_text = text.decode(locale.getpreferredencoding()) else: aligned_text = text else: # convert a non-string input if six.PY3: aligned_text = six.text_type(text) else: aligned_text = six.binary_type(text) # Calculate new text value current_text = self.window_text() new_text = current_text[:pos_start] + aligned_text + current_text[pos_end:] # Set text using IUIAutomationValuePattern self.iface_value.SetValue(new_text) #win32functions.WaitGuiThreadIdle(self) #time.sleep(Timings.after_editsetedittext_wait) if isinstance(aligned_text, six.text_type): self.actions.log('Set text to the edit box: ' + aligned_text) else: self.actions.log(b'Set text to the edit box: ' + aligned_text) # return this control so that actions can be chained. return self # set set_text as an alias to set_edit_text set_text = set_edit_text # ----------------------------------------------------------- def select(self, start=0, end=None): """Set the edit selection of the edit control""" self.verify_actionable() self.set_focus() # if we have been asked to select a string if isinstance(start, six.text_type): string_to_select = start elif isinstance(start, six.binary_type): string_to_select = start.decode(locale.getpreferredencoding()) elif isinstance(start, six.integer_types): if isinstance(end, six.integer_types) and start > end: start, end = end, start string_to_select = self.window_text()[start:end] if string_to_select: document_range = self.iface_text.DocumentRange search_range = document_range.FindText(string_to_select, False, False) try: search_range.Select() except ValueError: raise RuntimeError("Text '{0}' hasn't been found".format(string_to_select)) # return this control so that actions can be chained. return self # ==================================================================== class TabControlWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Tab control""" _control_types = ['Tab'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(TabControlWrapper, self).__init__(elem) # ---------------------------------------------------------------- def get_selected_tab(self): """Return an index of a selected tab""" return self.selected_item_index() # ---------------------------------------------------------------- def tab_count(self): """Return a number of tabs""" return self.control_count() # ---------------------------------------------------------------- def select(self, item): """Select a tab by index or by name""" self._select(item) return self # ---------------------------------------------------------------- def texts(self): """Tabs texts""" return self.children_texts() # ==================================================================== class SliderWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Slider control""" _control_types = ['Slider'] has_title = False # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(SliderWrapper, self).__init__(elem) # ----------------------------------------------------------- def min_value(self): """Get the minimum value of the Slider""" return self.iface_range_value.CurrentMinimum # ----------------------------------------------------------- def max_value(self): """Get the maximum value of the Slider""" return self.iface_range_value.CurrentMaximum # ----------------------------------------------------------- def small_change(self): """ Get a small change of slider's thumb This change is achieved by pressing left and right arrows when slider's thumb has keyboard focus. """ return self.iface_range_value.CurrentSmallChange # ----------------------------------------------------------- def large_change(self): """ Get a large change of slider's thumb This change is achieved by pressing PgUp and PgDown keys when slider's thumb has keyboard focus. """ return self.iface_range_value.CurrentLargeChange # ----------------------------------------------------------- def value(self): """Get a current position of slider's thumb""" return self.iface_range_value.CurrentValue # ----------------------------------------------------------- def set_value(self, value): """Set position of slider's thumb""" if isinstance(value, float): value_to_set = value elif isinstance(value, six.integer_types): value_to_set = value elif isinstance(value, six.text_type): value_to_set = float(value) else: raise ValueError("value should be either string or number") min_value = self.min_value() max_value = self.max_value() if not (min_value <= value_to_set <= max_value): raise ValueError("value should be bigger than {0} and smaller than {1}".format(min_value, max_value)) self.iface_range_value.SetValue(value_to_set) # ==================================================================== class HeaderWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Header control""" _control_types = ['Header'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(HeaderWrapper, self).__init__(elem) # ==================================================================== class HeaderItemWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Header Item control""" _control_types = ['HeaderItem'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(HeaderItemWrapper, self).__init__(elem) # ==================================================================== class ListItemWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible ListViewItem control""" _control_types = ['DataItem', 'ListItem', ] # ----------------------------------------------------------- def __init__(self, elem, container=None): """Initialize the control""" super(ListItemWrapper, self).__init__(elem) # Init a pointer to the item's container wrapper. # It must be set by a container wrapper producing the item. # Notice that the self.parent property isn't the same # because it results in a different instance of a wrapper. self.container = container # ----------------------------------------------------------- def is_checked(self): """Return True if the ListItem is checked Only items supporting Toggle pattern should answer. Raise NoPatternInterfaceError if the pattern is not supported """ return self.iface_toggle.ToggleState_On == toggle_state_on def texts(self): """Return a list of item texts""" content = [ch.window_text() for ch in self.children(content_only=True)] if content: return content else: # For native list with small icons return super(ListItemWrapper, self).texts() # ==================================================================== class ListViewWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible ListView control""" _control_types = ['DataGrid', 'List', 'Table'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(ListViewWrapper, self).__init__(elem) # Check if control supports Grid pattern # Control is actually a DataGrid or a List with Grid pattern support try: if self.iface_grid: self.iface_grid_support = True except NoPatternInterfaceError: self.iface_grid_support = False self.is_table = not self.iface_grid_support and self.element_info.control_type == "Table" self.row_header = False self.col_header = False def __getitem__(self, key): return self.get_item(key) def __raise_not_implemented(self): raise NotImplementedError("This method not work properly for WinForms DataGrid, use cells()") def __update_row_header(self): try: self.row_header = all(isinstance(six.next(row.iter_children()), HeaderWrapper) for row in self.children()) except StopIteration: self.row_header = False def __update_col_header(self): try: self.col_header = all(isinstance(col, HeaderWrapper) for col in six.next(self.iter_children()).children()) except StopIteration: self.col_header = False def __resolve_row_index(self, ind): self.__update_col_header() return ind + 1 if self.col_header and self.is_table else ind def __resolve_col_index(self, ind): self.__update_row_header() return ind + 1 if self.row_header and self.is_table else ind def __resolve_row_count(self, cnt): self.__update_col_header() return cnt - 1 if self.col_header and self.is_table else cnt # ----------------------------------------------------------- def item_count(self): """A number of items in the ListView""" if self.iface_grid_support: return self.iface_grid.CurrentRowCount else: # TODO: This could be implemented by getting custom ItemCount Property using RegisterProperty # TODO: See https://msdn.microsoft.com/ru-ru/library/windows/desktop/ff486373%28v=vs.85%29.aspx for details # TODO: comtypes doesn't seem to support IUIAutomationRegistrar interface return self.__resolve_row_count(len(self.children())) # ----------------------------------------------------------- def column_count(self): """Return the number of columns""" if self.iface_grid_support: return self.iface_grid.CurrentColumnCount elif self.is_table: self.__raise_not_implemented() # ListBox doesn't have columns return 0 # ----------------------------------------------------------- def get_header_controls(self): """Return Header controls associated with the Table""" return [cell for row in self.children() for cell in row.children() if isinstance(cell, HeaderWrapper)] # ----------------------------------------------------------- def get_header_control(self): """Return Header control associated with the ListView""" try: # A data grid control may have no header hdr = self.children(control_type="Header")[0] except(IndexError, NoPatternInterfaceError): hdr = None return hdr # ----------------------------------------------------------- def get_column(self, col_index): """Get the information for a column of the ListView""" col = None try: col = self.columns()[col_index] except comtypes.COMError: raise IndexError return col # ----------------------------------------------------------- def columns(self): """Get the information on the columns of the ListView""" if self.iface_grid_support: arr = self.iface_table.GetCurrentColumnHeaders() cols = uia_element_info.elements_from_uia_array(arr) return [uiawrapper.UIAWrapper(e) for e in cols] elif self.is_table: self.__raise_not_implemented() else: return [] # ----------------------------------------------------------- def cells(self): """Return list of list of cells for any type of contol""" row_start_index = self.__resolve_row_index(0) col_start_index = self.__resolve_col_index(0) rows = self.children(content_only=True) return [row.children(content_only=True)[col_start_index:] for row in rows[row_start_index:]] # ----------------------------------------------------------- def cell(self, row, column): """Return a cell in the ListView control Only for controls with Grid pattern support * **row** is an index of a row in the list. * **column** is an index of a column in the specified row. The returned cell can be of different control types. Mostly: TextBlock, ImageControl, EditControl, DataItem or even another layer of data items (Group, DataGrid) """ if not isinstance(row, six.integer_types) or not isinstance(column, six.integer_types): raise TypeError("row and column must be numbers") if self.iface_grid_support: try: e = self.iface_grid.GetItem(row, column) elem_info = uia_element_info.UIAElementInfo(e) cell_elem = uiawrapper.UIAWrapper(elem_info) except (comtypes.COMError, ValueError): raise IndexError elif self.is_table: # Workaround for WinForms, DataGrid equals list of lists _row = self.get_item(row) cell_elem = _row.children()[self.__resolve_col_index(column)] else: return None return cell_elem # ----------------------------------------------------------- def get_item(self, row): """Return an item of the ListView control * **row** can be either an index of the row or a string with the text of a cell in the row you want returned. """ # Verify arguments if isinstance(row, six.string_types): # Try to find item using FindItemByProperty # That way we can get access to virtualized (unloaded) items try: com_elem = self.iface_item_container.FindItemByProperty(0, IUIA().UIA_dll.UIA_NamePropertyId, row) # Try to load element using VirtualizedItem pattern try: get_elem_interface(com_elem, "VirtualizedItem").Realize() itm = uiawrapper.UIAWrapper(uia_element_info.UIAElementInfo(com_elem)) except NoPatternInterfaceError: # Item doesn't support VirtualizedItem pattern - item is already on screen or com_elem is NULL itm = uiawrapper.UIAWrapper(uia_element_info.UIAElementInfo(com_elem)) except (NoPatternInterfaceError, ValueError): # com_elem is NULL pointer or item doesn't support ItemContainer pattern # Get DataGrid row try: itm = self.descendants(title=row)[0] # Applications like explorer.exe usually return ListItem # directly while other apps can return only a cell. # In this case we need to take its parent - the whole row. if not isinstance(itm, ListItemWrapper): itm = itm.parent() except IndexError: raise ValueError("Element '{0}' not found".format(row)) elif isinstance(row, six.integer_types): # Get the item by a row index try: com_elem = 0 for _ in range(0, self.__resolve_row_index(row) + 1): com_elem = self.iface_item_container.FindItemByProperty(com_elem, 0, uia_defs.vt_empty) # Try to load element using VirtualizedItem pattern try: get_elem_interface(com_elem, "VirtualizedItem").Realize() except NoPatternInterfaceError: pass itm = uiawrapper.UIAWrapper(uia_element_info.UIAElementInfo(com_elem)) except (NoPatternInterfaceError, ValueError, AttributeError): list_items = self.children(content_only=True) itm = list_items[self.__resolve_row_index(row)] else: raise TypeError("String type or integer is expected") # Give to the item a pointer on its container itm.container = self return itm item = get_item # this is an alias to be consistent with other content elements # ----------------------------------------------------------- def get_items(self): """Return all items of the ListView control""" return self.children(content_only=True) items = get_items # this is an alias to be consistent with other content elements # ----------------------------------------------------------- def get_item_rect(self, item_index): """Return the bounding rectangle of the list view item The method is kept mostly for a backward compatibility with the native ListViewWrapper interface """ itm = self.get_item(item_index) return itm.rectangle() # ----------------------------------------------------------- def get_selected_count(self): """Return a number of selected items The call can be quite expensieve as we retrieve all the selected items in order to count them """ selection = self.get_selection() if selection: return len(selection) else: return 0 # ----------------------------------------------------------- def texts(self): """Return a list of item texts""" return [elem.texts() for elem in self.children(content_only=True)] # ----------------------------------------------------------- @property def writable_props(self): """Extend default properties list.""" props = super(ListViewWrapper, self).writable_props props.extend(['column_count', 'item_count', 'columns', # 'items', ]) return props # ==================================================================== class MenuItemWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible MenuItem control""" _control_types = ['MenuItem'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(MenuItemWrapper, self).__init__(elem) # ----------------------------------------------------------- def items(self): """Find all items of the menu item""" return self.children(control_type="MenuItem") # ----------------------------------------------------------- def select(self): """Apply Select pattern""" try: self.iface_selection_item.Select() except(NoPatternInterfaceError): try: self.iface_invoke.Invoke() except(NoPatternInterfaceError): raise AttributeError # ==================================================================== class MenuWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible MenuBar or Menu control""" _control_types = ['MenuBar', 'Menu', ] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(MenuWrapper, self).__init__(elem) # ----------------------------------------------------------- def items(self): """Find all menu items""" return self.children(control_type="MenuItem") # ----------------------------------------------------------- def item_by_index(self, idx): """Find a menu item specified by the index""" item = self.items()[idx] return item # ----------------------------------------------------------- def _activate(self, item, is_last): """Activate the specified item""" if not item.is_active(): item.set_focus() try: item.expand() except(NoPatternInterfaceError): if self.element_info.framework_id == 'WinForm' and not is_last: item.select() # ----------------------------------------------------------- def _sub_item_by_text(self, menu, name, exact, is_last): """Find a menu sub-item by the specified text""" sub_item = None items = menu.items() if items: if exact: for i in items: if name == i.window_text(): sub_item = i break else: texts = [] for i in items: texts.append(i.window_text()) sub_item = findbestmatch.find_best_match(name, texts, items) self._activate(sub_item, is_last) return sub_item # ----------------------------------------------------------- def _sub_item_by_idx(self, menu, idx, is_last): """Find a menu sub-item by the specified index""" sub_item = None items = menu.items() if items: sub_item = items[idx] self._activate(sub_item, is_last) return sub_item # ----------------------------------------------------------- def item_by_path(self, path, exact=False): """Find a menu item specified by the path The full path syntax is specified in: :py:meth:`.controls.menuwrapper.Menu.get_menu_path` Note: $ - specifier is not supported """ # Get the path parts menu_items = [p.strip() for p in path.split("->")] items_cnt = len(menu_items) if items_cnt == 0: raise IndexError() for item in menu_items: if not item: raise IndexError("Empty item name between '->' separators") def next_level_menu(parent_menu, item_name, is_last): if item_name.startswith("#"): return self._sub_item_by_idx(parent_menu, int(item_name[1:]), is_last) else: return self._sub_item_by_text(parent_menu, item_name, exact, is_last) # Find a top level menu item and select it. After selecting this item # a new Menu control is created and placed on the dialog. It can be # a direct child or a descendant. # Sometimes we need to re-discover Menu again try: menu = next_level_menu(self, menu_items[0], items_cnt == 1) if items_cnt == 1: return menu if not menu.items(): self._activate(menu, False) timings.wait_until( timings.Timings.window_find_timeout, timings.Timings.window_find_retry, lambda: len(self.top_level_parent().descendants(control_type="Menu")) > 0) menu = self.top_level_parent().descendants(control_type="Menu")[0] for i in range(1, items_cnt): menu = next_level_menu(menu, menu_items[i], items_cnt == i + 1) except(AttributeError): raise IndexError() return menu # ==================================================================== class TooltipWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Tooltip control""" _control_types = ['ToolTip'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(TooltipWrapper, self).__init__(elem) # ==================================================================== class ToolbarWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible ToolBar control The control's children usually are: Buttons, SplitButton, MenuItems, ThumbControls, TextControls, Separators, CheckBoxes. Notice that ToolTip controls are children of the top window and not of the toolbar. """ _control_types = ['ToolBar'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(ToolbarWrapper, self).__init__(elem) self.win32_wrapper = None if not self.children() and self.element_info.handle is not None: self.win32_wrapper = common_controls.ToolbarWrapper(self.element_info.handle) @property def writable_props(self): """Extend default properties list.""" props = super(ToolbarWrapper, self).writable_props props.extend(['button_count']) return props # ---------------------------------------------------------------- def texts(self): """Return texts of the Toolbar""" return [c.window_text() for c in self.buttons()] #---------------------------------------------------------------- def button_count(self): """Return a number of buttons on the ToolBar""" if self.win32_wrapper is not None: return self.win32_wrapper.button_count() else: return len(self.children()) # ---------------------------------------------------------------- def buttons(self): """Return all available buttons""" if self.win32_wrapper is not None: btn_count = self.win32_wrapper.button_count() cc = [] for btn_num in range(btn_count): relative_point = self.win32_wrapper.get_button_rect(btn_num).mid_point() button_coord_x, button_coord_y = self.client_to_screen(relative_point) btn_elem_info = UIAElementInfo.from_point(button_coord_x, button_coord_y) cc.append(uiawrapper.UIAWrapper(btn_elem_info)) else: cc = self.children() return cc # ---------------------------------------------------------------- def button(self, button_identifier, exact=True): """Return a button by the specified identifier * **button_identifier** can be either an index of a button or a string with the text of the button. * **exact** flag specifies if the exact match for the text look up has to be applied. """ cc = self.buttons() texts = [c.window_text() for c in cc] if isinstance(button_identifier, six.string_types): self.actions.log('Toolbar buttons: ' + str(texts)) if exact: try: button_index = texts.index(button_identifier) except ValueError: raise findbestmatch.MatchError(items=texts, tofind=button_identifier) else: # one of these will be returned for the matching text indices = [i for i in range(0, len(texts))] # find which index best matches that text button_index = findbestmatch.find_best_match(button_identifier, texts, indices) else: button_index = button_identifier return cc[button_index] # ---------------------------------------------------------------- def check_button(self, button_identifier, make_checked, exact=True): """Find where the button is and toggle it * **button_identifier** can be either an index of the button or a string with the text on the button. * **make_checked** specifies the required toggled state of the button. If the button is already in the specified state the state isn't changed. * **exact** flag specifies if the exact match for the text look up has to be applied """ self.actions.logSectionStart('Checking "' + self.window_text() + '" toolbar button "' + str(button_identifier) + '"') button = self.button(button_identifier, exact=exact) if make_checked: self.actions.log('Pressing down toolbar button "' + str(button_identifier) + '"') else: self.actions.log('Pressing up toolbar button "' + str(button_identifier) + '"') if not button.is_enabled(): self.actions.log('Toolbar button is not enabled!') raise RuntimeError("Toolbar button is not enabled!") res = (button.get_toggle_state() == toggle_state_on) if res != make_checked: button.toggle() self.actions.logSectionEnd() return button # ==================================================================== class TreeItemWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible TreeItem control In addition to the provided methods of the wrapper additional inherited methods can be especially helpful: select(), extend(), collapse(), is_extended(), is_collapsed(), click_input(), rectangle() and many others """ _control_types = ['TreeItem'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(TreeItemWrapper, self).__init__(elem) # ----------------------------------------------------------- def is_checked(self): """Return True if the TreeItem is checked Only items supporting Toggle pattern should answer. Raise NoPatternInterfaceError if the pattern is not supported """ return (self.iface_toggle.ToggleState_On == toggle_state_on) # ----------------------------------------------------------- def ensure_visible(self): """Make sure that the TreeView item is visible""" self.iface_scroll_item.ScrollIntoView() # ----------------------------------------------------------- def get_child(self, child_spec, exact=False): """Return the child item of this item Accepts either a string or an index. If a string is passed then it returns the child item with the best match for the string. """ cc = self.children(control_type='TreeItem') if isinstance(child_spec, six.string_types): texts = [c.window_text() for c in cc] if exact: if child_spec in texts: index = texts.index(child_spec) else: raise IndexError('There is no child equal to "' + str(child_spec) + '" in ' + str(texts)) else: indices = range(0, len(texts)) index = findbestmatch.find_best_match( child_spec, texts, indices, limit_ratio=.6) else: index = child_spec return cc[index] # ----------------------------------------------------------- def _calc_click_coords(self): """Override the BaseWrapper helper method Try to get coordinates of a text box inside the item. If no text box found just set coordinates close to a left part of the item rectangle The returned coordinates are always absolute """ tt = self.children(control_type="Text") if tt: point = tt[0].rectangle().mid_point() # convert from POINT to a simple tuple coords = (point.x, point.y) else: rect = self.rectangle() coords = (rect.left + int(float(rect.width()) / 4.), rect.top + int(float(rect.height()) / 2.)) return coords # ----------------------------------------------------------- def sub_elements(self): """Return a list of all visible sub-items of this control""" return self.descendants(control_type="TreeItem") # ==================================================================== class TreeViewWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Tree control""" _control_types = ['Tree'] # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(TreeViewWrapper, self).__init__(elem) @property def writable_props(self): """Extend default properties list.""" props = super(TreeViewWrapper, self).writable_props props.extend(['item_count']) return props # ----------------------------------------------------------- def item_count(self): """Return a number of items in TreeView""" return len(self.descendants(control_type="TreeItem")) # ----------------------------------------------------------- def roots(self): """Return root elements of TreeView""" return self.children(control_type="TreeItem") # ----------------------------------------------------------- def get_item(self, path, exact=False): r"""Read a TreeView item * **path** a path to the item to return. This can be one of the following: * A string separated by \\ characters. The first character must be \\. This string is split on the \\ characters and each of these is used to find the specific child at each level. The \\ represents the root item - so you don't need to specify the root itself. * A list/tuple of strings - The first item should be the root element. * A list/tuple of integers - The first item the index which root to select. Indexing always starts from zero: get_item((0, 2, 3)) * **exact** a flag to request exact match of strings in the path or apply a fuzzy logic of best_match thus allowing non-exact path specifiers """ if not self.item_count(): return None # Ensure the path is absolute if isinstance(path, six.string_types): if not path.startswith("\\"): raise RuntimeError( "Only absolute paths allowed - " "please start the path with \\") path = path.split("\\")[1:] current_elem = None # find the correct root elem if isinstance(path[0], int): current_elem = self.roots()[path[0]] else: roots = self.roots() texts = [r.window_text() for r in roots] if exact: if path[0] in texts: current_elem = roots[texts.index(path[0])] else: raise IndexError("There is no root element equal to '{0}'".format(path[0])) else: try: current_elem = findbestmatch.find_best_match( path[0], texts, roots, limit_ratio=.6) except IndexError: raise IndexError("There is no root element similar to '{0}'".format(path[0])) # now for each of the lower levels # just index into it's children for child_spec in path[1:]: try: # ensure that the item is expanded as this is sometimes # required for loading tree view branches current_elem.expand() current_elem = current_elem.get_child(child_spec, exact) except IndexError: if isinstance(child_spec, six.string_types): raise IndexError("Item '{0}' does not have a child '{1}'".format( current_elem.window_text(), child_spec)) else: raise IndexError("Item '{0}' does not have {1} children".format( current_elem.window_text(), child_spec + 1)) except comtypes.COMError: raise IndexError("Item '{0}' does not have a child '{1}'".format( current_elem.window_text(), child_spec)) return current_elem # ----------------------------------------------------------- def print_items(self): """Print all items with line indents""" self.text = "" def _print_one_level(item, ident): """Get texts for the item and its children""" self.text += " " * ident + item.window_text() + "\n" for child in item.children(control_type="TreeItem"): _print_one_level(child, ident + 1) for root in self.roots(): _print_one_level(root, 0) return self.text # ==================================================================== class StaticWrapper(uiawrapper.UIAWrapper): """Wrap an UIA-compatible Text control""" _control_types = ['Text'] can_be_label = True # ----------------------------------------------------------- def __init__(self, elem): """Initialize the control""" super(StaticWrapper, self).__init__(elem)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ## # Copyright 2013-2021 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ Bootstrap script for EasyBuild Installs distribute with included (patched) distribute_setup.py script to obtain easy_install, and then performs a staged install of EasyBuild: * stage 0: install setuptools (which provides easy_install), unless already available * stage 1: install EasyBuild with easy_install to a temporary directory * stage 2: install EasyBuild with EasyBuild from stage 1 to specified install directory Authors: Kenneth Hoste (UGent), Stijn Deweirdt (UGent), Ward Poelmans (UGent) License: GPLv2 inspired by https://bitbucket.org/pdubroy/pip/raw/tip/getpip.py (via http://dubroy.com/blog/so-you-want-to-install-a-python-package/) """ import codecs import copy import glob import os import re import shutil import site import sys import tempfile import traceback from distutils.version import LooseVersion from hashlib import md5 from platform import python_version IS_PY3 = sys.version_info[0] == 3 if not IS_PY3: import urllib2 as std_urllib else: import urllib.request as std_urllib EB_BOOTSTRAP_VERSION = '20210618.01' # argparse preferrred, optparse deprecated >=2.7 HAVE_ARGPARSE = False try: import argparse HAVE_ARGPARSE = True except ImportError: import optparse PYPI_SOURCE_URL = 'https://pypi.python.org/packages/source' VSC_BASE = 'vsc-base' VSC_INSTALL = 'vsc-install' # Python 3 is not supported by the vsc-* packages EASYBUILD_PACKAGES = (([] if IS_PY3 else [VSC_INSTALL, VSC_BASE]) + ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs']) STAGE1_SUBDIR = 'eb_stage1' # the EasyBuild bootstrap script is deprecated, and will only run if $EASYBUILD_BOOTSTRAP_DEPRECATED is defined EASYBUILD_BOOTSTRAP_DEPRECATED = os.environ.pop('EASYBUILD_BOOTSTRAP_DEPRECATED', None) # set print_debug to True for detailed progress info print_debug = os.environ.pop('EASYBUILD_BOOTSTRAP_DEBUG', False) # install with --force in stage2? forced_install = os.environ.pop('EASYBUILD_BOOTSTRAP_FORCED', False) # don't add user site directory to sys.path (equivalent to python -s), see https://www.python.org/dev/peps/pep-0370/ os.environ['PYTHONNOUSERSITE'] = '1' site.ENABLE_USER_SITE = False # clean PYTHONPATH to avoid finding readily installed stuff os.environ['PYTHONPATH'] = '' EASYBUILD_BOOTSTRAP_SOURCEPATH = os.environ.pop('EASYBUILD_BOOTSTRAP_SOURCEPATH', None) EASYBUILD_BOOTSTRAP_SKIP_STAGE0 = os.environ.pop('EASYBUILD_BOOTSTRAP_SKIP_STAGE0', False) EASYBUILD_BOOTSTRAP_FORCE_VERSION = os.environ.pop('EASYBUILD_BOOTSTRAP_FORCE_VERSION', None) # keep track of original environment (after clearing PYTHONPATH) orig_os_environ = copy.deepcopy(os.environ) # If the modules tool is specified, use it easybuild_modules_tool = os.environ.get('EASYBUILD_MODULES_TOOL', None) easybuild_module_syntax = os.environ.get('EASYBUILD_MODULE_SYNTAX', None) # If modules subdir specifications are defined, use them easybuild_installpath_modules = os.environ.get('EASYBUILD_INSTALLPATH_MODULES', None) easybuild_subdir_modules = os.environ.get('EASYBUILD_SUBDIR_MODULES', 'modules') easybuild_suffix_modules_path = os.environ.get('EASYBUILD_SUFFIX_MODULES_PATH', 'all') # # Utility functions # def debug(msg): """Print debug message.""" if print_debug: print("[[DEBUG]] " + msg) def info(msg): """Print info message.""" print("[[INFO]] " + msg) def error(msg, exit=True): """Print error message and exit.""" print("[[ERROR]] " + msg) sys.exit(1) def mock_stdout_stderr(): """Mock stdout/stderr channels""" try: from cStringIO import StringIO except ImportError: from io import StringIO orig_stdout, orig_stderr = sys.stdout, sys.stderr sys.stdout.flush() sys.stdout = StringIO() sys.stderr.flush() sys.stderr = StringIO() return orig_stdout, orig_stderr def restore_stdout_stderr(orig_stdout, orig_stderr): """Restore stdout/stderr channels after mocking""" # collect output sys.stdout.flush() stdout = sys.stdout.getvalue() sys.stderr.flush() stderr = sys.stderr.getvalue() # restore original stdout/stderr sys.stdout = orig_stdout sys.stderr = orig_stderr return stdout, stderr def det_lib_path(libdir): """Determine relative path of Python library dir.""" if libdir is None: libdir = 'lib' pyver = '.'.join([str(x) for x in sys.version_info[:2]]) return os.path.join(libdir, 'python%s' % pyver, 'site-packages') def det_modules_path(install_path): """Determine modules path.""" if easybuild_installpath_modules is not None: modules_path = os.path.join(easybuild_installpath_modules, easybuild_suffix_modules_path) else: modules_path = os.path.join(install_path, easybuild_subdir_modules, easybuild_suffix_modules_path) return modules_path def find_egg_dir_for(path, pkg): """Find full path of egg dir for given package.""" res = None for libdir in ['lib', 'lib64']: full_libpath = os.path.join(path, det_lib_path(libdir)) eggdir_regex = re.compile('%s-[0-9a-z.]+-py[0-9.]+.egg' % pkg.replace('-', '_')) subdirs = (os.path.exists(full_libpath) and sorted(os.listdir(full_libpath))) or [] for subdir in subdirs: if eggdir_regex.match(subdir): eggdir = os.path.join(full_libpath, subdir) if res is None: debug("Found egg dir for %s at %s" % (pkg, eggdir)) res = eggdir else: debug("Found another egg dir for %s at %s (ignoring it)" % (pkg, eggdir)) # no egg dir found if res is None: debug("Failed to determine egg dir path for %s in %s (subdirs: %s)" % (pkg, path, subdirs)) return res def prep(path): """Prepare for installing a Python package in the specified path.""" debug("Preparing for path %s" % path) # restore original environment first os.environ = copy.deepcopy(orig_os_environ) debug("os.environ['PYTHONPATH'] after reset: %s" % os.environ['PYTHONPATH']) # update PATH os.environ['PATH'] = os.pathsep.join([os.path.join(path, 'bin')] + [x for x in os.environ.get('PATH', '').split(os.pathsep) if len(x) > 0]) debug("$PATH: %s" % os.environ['PATH']) # update actual Python search path sys.path.insert(0, path) # make sure directory exists (this is required by setuptools) # usually it's 'lib', but can be 'lib64' as well for libdir in ['lib', 'lib64']: full_libpath = os.path.join(path, det_lib_path(libdir)) if not os.path.exists(full_libpath): os.makedirs(full_libpath) # PYTHONPATH needs to be set as well, otherwise setuptools will fail pythonpaths = [x for x in os.environ.get('PYTHONPATH', '').split(os.pathsep) if len(x) > 0] os.environ['PYTHONPATH'] = os.pathsep.join([full_libpath] + pythonpaths) debug("$PYTHONPATH: %s" % os.environ['PYTHONPATH']) os.environ['EASYBUILD_MODULES_TOOL'] = easybuild_modules_tool debug("$EASYBUILD_MODULES_TOOL set to %s" % os.environ['EASYBUILD_MODULES_TOOL']) if easybuild_module_syntax: # if module syntax is specified, use it os.environ['EASYBUILD_MODULE_SYNTAX'] = easybuild_module_syntax debug("Using specified module syntax: %s" % os.environ['EASYBUILD_MODULE_SYNTAX']) elif easybuild_modules_tool != 'Lmod': # Lua is the default module syntax, but that requires Lmod # if Lmod is not being used, use Tcl module syntax os.environ['EASYBUILD_MODULE_SYNTAX'] = 'Tcl' debug("$EASYBUILD_MODULE_SYNTAX set to %s" % os.environ['EASYBUILD_MODULE_SYNTAX']) def check_module_command(tmpdir): """Check which module command is available, and prepare for using it.""" global easybuild_modules_tool if easybuild_modules_tool is not None: info("Using modules tool specified by $EASYBUILD_MODULES_TOOL: %s" % easybuild_modules_tool) return easybuild_modules_tool def check_cmd_help(modcmd): """Check 'help' output for specified command.""" modcmd_re = re.compile(r'module\s.*command') cmd = "%s python help" % modcmd os.system("%s > %s 2>&1" % (cmd, out)) txt = open(out, 'r').read() debug("Output from %s: %s" % (cmd, txt)) return modcmd_re.search(txt) def is_modulecmd_tcl_modulestcl(): """Determine if modulecmd.tcl is EnvironmentModulesTcl.""" modcmd_re = re.compile('Modules Release Tcl') cmd = "modulecmd.tcl python --version" os.system("%s > %s 2>&1" % (cmd, out)) txt = open(out, 'r').read() debug("Output from %s: %s" % (cmd, txt)) return modcmd_re.search(txt) # order matters, which is why we don't use a dict known_module_commands = [ ('lmod', 'Lmod'), ('modulecmd.tcl', 'EnvironmentModules'), ('modulecmd', 'EnvironmentModulesC'), ] out = os.path.join(tmpdir, 'module_command.out') modtool = None for modcmd, modtool in known_module_commands: if check_cmd_help(modcmd): # distinguish between EnvironmentModulesTcl and EnvironmentModules if modcmd == 'modulecmd.tcl' and is_modulecmd_tcl_modulestcl(): modtool = 'EnvironmentModulesTcl' easybuild_modules_tool = modtool info("Found module command '%s' (%s), so using it." % (modcmd, modtool)) break elif modcmd == 'lmod': # check value of $LMOD_CMD as fallback modcmd = os.environ.get('LMOD_CMD') if modcmd and check_cmd_help(modcmd): easybuild_modules_tool = modtool info("Found module command '%s' via $LMOD_CMD (%s), so using it." % (modcmd, modtool)) break elif modtool == 'EnvironmentModules': # check value of $MODULESHOME as fallback moduleshome = os.environ.get('MODULESHOME', 'MODULESHOME_NOT_DEFINED') modcmd = os.path.join(moduleshome, 'libexec', 'modulecmd.tcl') if os.path.exists(modcmd) and check_cmd_help(modcmd): easybuild_modules_tool = modtool info("Found module command '%s' via $MODULESHOME (%s), so using it." % (modcmd, modtool)) break if easybuild_modules_tool is None: mod_cmds = [m for (m, _) in known_module_commands] msg = [ "Could not find any module command, make sure one available in your $PATH.", "Known module commands are checked in order, and include: %s" % ', '.join(mod_cmds), "Check the output of 'type module' to determine the location of the module command you are using.", ] error('\n'.join(msg)) return modtool def check_setuptools(): """Check whether a suitable setuptools installation is already available.""" debug("Checking whether suitable setuptools installation is available...") res = None _, outfile = tempfile.mkstemp() # note: we need to be very careful here, because switching to a different setuptools installation (e.g. in stage0) # after the setuptools module was imported is very tricky... # So, we'll check things by running commands through os.system rather than importing setuptools directly. cmd_tmpl = "%s -c '%%s' > %s 2>&1" % (sys.executable, outfile) # check setuptools version try: os.system(cmd_tmpl % "import setuptools; print(setuptools.__version__)") setuptools_ver = LooseVersion(open(outfile).read().strip()) debug("Found setuptools version %s" % setuptools_ver) min_setuptools_ver = '0.6c11' if setuptools_ver < LooseVersion(min_setuptools_ver): debug("Minimal setuptools version %s not satisfied, found '%s'" % (min_setuptools_ver, setuptools_ver)) res = False except Exception as err: debug("Failed to check setuptools version: %s" % err) res = False os.system(cmd_tmpl % "from setuptools.command import easy_install; print(easy_install.__file__)") out = open(outfile).read().strip() debug("Location of setuptools' easy_install module: %s" % out) if 'setuptools/command/easy_install' not in out: debug("Module 'setuptools.command.easy_install not found") res = False if res is None: os.system(cmd_tmpl % "import setuptools; print(setuptools.__file__)") setuptools_loc = open(outfile).read().strip() res = os.path.dirname(os.path.dirname(setuptools_loc)) debug("Location of setuptools installation: %s" % res) try: os.remove(outfile) except Exception: pass return res def run_easy_install(args): """Run easy_install with specified list of arguments""" import setuptools debug("Active setuptools installation: %s" % setuptools.__file__) from setuptools.command import easy_install orig_stdout, orig_stderr = mock_stdout_stderr() try: easy_install.main(args) easy_install_stdout, easy_install_stderr = restore_stdout_stderr(orig_stdout, orig_stderr) except (Exception, SystemExit) as err: easy_install_stdout, easy_install_stderr = restore_stdout_stderr(orig_stdout, orig_stderr) error("Running 'easy_install %s' failed: %s\n%s" % (' '.join(args), err, traceback.format_exc())) debug("stdout for 'easy_install %s':\n%s" % (' '.join(args), easy_install_stdout)) debug("stderr for 'easy_install %s':\n%s" % (' '.join(args), easy_install_stderr)) def check_easy_install_cmd(): """Try to make sure available 'easy_install' command matches active 'setuptools' installation.""" debug("Checking whether available 'easy_install' command matches active 'setuptools' installation...") _, outfile = tempfile.mkstemp() import setuptools debug("Location of active setuptools installation: %s" % setuptools.__file__) easy_install_regex = re.compile('^(setuptools|distribute) %s' % setuptools.__version__) debug("Pattern for 'easy_install --version': %s" % easy_install_regex.pattern) pythonpath = os.getenv('PYTHONPATH', '') cmd = "PYTHONPATH='%s' %s -m easy_install --version" % (pythonpath, sys.executable) os.system("%s > %s 2>&1" % (cmd, outfile)) outtxt = open(outfile).read().strip() debug("Output of '%s':\n%s" % (cmd, outtxt)) res = bool(easy_install_regex.match(outtxt)) debug("Result: %s" % res) if res: debug("Found right 'easy_install' command") return error("Failed to find right 'easy_install' command!") # # Stage functions # def stage0(tmpdir): """STAGE 0: Prepare and install distribute via included (patched) distribute_setup.py script.""" print('\n') info("+++ STAGE 0: installing distribute via included (patched) distribute_setup.py...\n") txt = DISTRIBUTE_SETUP_PY if not print_debug: # silence distribute_setup.py by redirecting output to /dev/null txt = re.sub(r'([^\n]*)(return subprocess.call\(args)(\) == 0)', r"\1f = open(os.devnull, 'w'); \2, stdout=f, stderr=f\3", txt) # silence distribute_setup.py completely by setting high log level threshold txt = re.sub(r'([^\n]*)(# extracting the tarball[^\n]*)', r'\1log.set_verbosity(1000)\n\1\2', txt) # write distribute_setup.py to file (with correct header) distribute_setup = os.path.join(tmpdir, 'distribute_setup.py') f = open(distribute_setup, "w") f.write(txt) f.close() # create expected directories, set Python search path debug("preparing environment...") prep(tmpdir) import distribute_setup debug("distribute_setup.__file__: %s" % distribute_setup.__file__) # install easy_install to temporary directory from distribute_setup import main as distribute_setup_main orig_sys_argv = sys.argv[:] # make a copy sys.argv.append('--prefix=%s' % tmpdir) # We download a custom version of distribute: it uses a newer version of markerlib to avoid a bug (#1099) # It's is the source of distribute 0.6.49 with the file _markerlib/markers.py replaced by the 0.6 version of # markerlib which can be found at https://pypi.python.org/pypi/markerlib/0.6.0 sys.argv.append('--download-base=https://easybuilders.github.io/easybuild/files/') distribute_setup_main(version="0.6.49-patched1") sys.argv = orig_sys_argv # sanity check if os.path.exists(os.path.join(tmpdir, 'bin', 'easy_install')): debug("easy_install sanity check OK") else: error("Installing distribute which should deliver easy_install failed?") # prepend distribute egg dir to sys.path, so we know which setuptools we're using distribute_egg_dir = find_egg_dir_for(tmpdir, 'distribute') if distribute_egg_dir is None: error("Failed to determine egg dir path for distribute_egg_dir in %s" % tmpdir) else: sys.path.insert(0, distribute_egg_dir) # make sure we're getting the setuptools we expect import setuptools from setuptools.command import easy_install for mod, path in [('setuptools', setuptools.__file__), ('easy_install', easy_install.__file__)]: if tmpdir not in path: error("Found another %s module than expected: %s" % (mod, path)) else: debug("Found %s in expected path, good!" % mod) info("Installed setuptools version %s (%s)" % (setuptools.__version__, setuptools.__file__)) return distribute_egg_dir def stage1(tmpdir, sourcepath, distribute_egg_dir, forcedversion): """STAGE 1: temporary install EasyBuild using distribute's easy_install.""" print('\n') info("+++ STAGE 1: installing EasyBuild in temporary dir with easy_install...\n") # determine locations of source tarballs, if sources path is specified source_tarballs = {} if sourcepath is not None: info("Fetching sources from %s..." % sourcepath) for pkg in EASYBUILD_PACKAGES: pkg_tarball_glob = os.path.join(sourcepath, '%s*.tar.gz' % pkg) pkg_tarball_paths = glob.glob(pkg_tarball_glob) if len(pkg_tarball_paths) > 1: error("Multiple tarballs found for %s: %s" % (pkg, pkg_tarball_paths)) elif len(pkg_tarball_paths) == 0: if pkg not in [VSC_BASE, VSC_INSTALL]: # vsc-base package is not strictly required # it's only a dependency since EasyBuild v2.0; # with EasyBuild v2.0, it will be pulled in from PyPI when installing easybuild-framework; # vsc-install is an optional dependency, only required to run unit tests error("Missing source tarball: %s" % pkg_tarball_glob) else: info("Found %s for %s package" % (pkg_tarball_paths[0], pkg)) source_tarballs.update({pkg: pkg_tarball_paths[0]}) if print_debug: debug("$ easy_install --help") run_easy_install(['--help']) # prepare install dir targetdir_stage1 = os.path.join(tmpdir, STAGE1_SUBDIR) prep(targetdir_stage1) # set PATH, Python search path # install latest EasyBuild with easy_install from PyPi cmd = [ '--upgrade', # make sure the latest version is pulled from PyPi '--prefix=%s' % targetdir_stage1, ] post_vsc_base = [] if source_tarballs: # install provided source tarballs (order matters) cmd.extend([source_tarballs[pkg] for pkg in EASYBUILD_PACKAGES if pkg in source_tarballs]) # add vsc-base again at the end, to avoid that the one available on the system is used instead if VSC_BASE in source_tarballs: cmd.append(source_tarballs[VSC_BASE]) else: # install meta-package easybuild from PyPI if forcedversion: cmd.append('easybuild==%s' % forcedversion) elif IS_PY3: cmd.append('easybuild>=4.0') # Python 3 support added in EasyBuild 4 else: cmd.append('easybuild') if not IS_PY3: # install vsc-base again at the end, to avoid that the one available on the system is used instead post_vsc_base = cmd[:] post_vsc_base[-1] = VSC_BASE + '<2.9.0' if not print_debug: cmd.insert(0, '--quiet') # There is no support for Python3 in the older vsc-* packages and EasyBuild 4 includes working versions of vsc-* if not IS_PY3: # install vsc-install version prior to 0.11.4, where mock was introduced as a dependency # workaround for problem reported in https://github.com/easybuilders/easybuild-framework/issues/2712 # also stick to vsc-base < 2.9.0 to avoid requiring 'future' Python package as dependency for pkg in [VSC_INSTALL + '<0.11.4', VSC_BASE + '<2.9.0']: precmd = cmd[:-1] + [pkg] info("running pre-install command 'easy_install %s'" % (' '.join(precmd))) run_easy_install(precmd) info("installing EasyBuild with 'easy_install %s'\n" % (' '.join(cmd))) syntax_error_note = '\n'.join([ "Note: a 'SyntaxError' may be reported for the easybuild/tools/py2vs3/py%s.py module." % ('3', '2')[IS_PY3], "You can safely ignore this message, it will not affect the functionality of the EasyBuild installation.", '', ]) info(syntax_error_note) run_easy_install(cmd) if post_vsc_base: info("running post install command 'easy_install %s'" % (' '.join(post_vsc_base))) run_easy_install(post_vsc_base) pkg_egg_dir = find_egg_dir_for(targetdir_stage1, VSC_BASE) if pkg_egg_dir is None: # if vsc-base available on system is the same version as the one being installed, # the .egg directory may not get installed... # in that case, try to have it *copied* by also including --always-copy; # using --always-copy should be used as a last resort, since it can result in all kinds of problems info(".egg dir for vsc-base not found, trying again with --always-copy...") post_vsc_base.insert(0, '--always-copy') info("running post install command 'easy_install %s'" % (' '.join(post_vsc_base))) run_easy_install(post_vsc_base) # clear the Python search path, we only want the individual eggs dirs to be in the PYTHONPATH (see below) # this is needed to avoid easy-install.pth controlling what Python packages are actually used if distribute_egg_dir is not None: os.environ['PYTHONPATH'] = distribute_egg_dir else: del os.environ['PYTHONPATH'] # template string to inject in template easyconfig templates = {} for pkg in EASYBUILD_PACKAGES: templates.update({pkg: ''}) pkg_egg_dir = find_egg_dir_for(targetdir_stage1, pkg) if pkg_egg_dir is None: if pkg in [VSC_BASE, VSC_INSTALL]: # vsc-base is optional in older EasyBuild versions continue # prepend EasyBuild egg dirs to Python search path, so we know which EasyBuild we're using sys.path.insert(0, pkg_egg_dir) pythonpaths = [x for x in os.environ.get('PYTHONPATH', '').split(os.pathsep) if len(x) > 0] os.environ['PYTHONPATH'] = os.pathsep.join([pkg_egg_dir] + pythonpaths) debug("$PYTHONPATH: %s" % os.environ['PYTHONPATH']) if source_tarballs: if pkg in source_tarballs: templates.update({pkg: "'%s'," % os.path.basename(source_tarballs[pkg])}) else: # determine per-package versions based on egg dirs, to use them in easyconfig template version_regex = re.compile('%s-([0-9a-z.-]*)-py[0-9.]*.egg' % pkg.replace('-', '_')) pkg_egg_dirname = os.path.basename(pkg_egg_dir) res = version_regex.search(pkg_egg_dirname) if res is not None: pkg_version = res.group(1) debug("Found version for easybuild-%s: %s" % (pkg, pkg_version)) templates.update({pkg: "'%s-%s.tar.gz'," % (pkg, pkg_version)}) else: tup = (pkg, pkg_egg_dirname, version_regex.pattern) error("Failed to determine version for easybuild-%s package from %s with %s" % tup) # figure out EasyBuild version via eb command line # note: EasyBuild uses some magic to determine the EasyBuild version based on the versions of the individual pkgs ver_regex = {'ver': '[0-9.]*[a-z0-9]*'} pattern = r"This is EasyBuild (?P<version>%(ver)s) \(framework: %(ver)s, easyblocks: %(ver)s\)" % ver_regex version_re = re.compile(pattern) version_out_file = os.path.join(tmpdir, 'eb_version.out') eb_version_cmd = 'from easybuild.tools.version import this_is_easybuild; print(this_is_easybuild())' cmd = "%s -c '%s' > %s 2>&1" % (sys.executable, eb_version_cmd, version_out_file) debug("Determining EasyBuild version using command '%s'" % cmd) os.system(cmd) txt = open(version_out_file, "r").read() res = version_re.search(txt) if res: eb_version = res.group(1) debug("installing EasyBuild v%s" % eb_version) else: error("Stage 1 failed, could not determine EasyBuild version (txt: %s)." % txt) templates.update({'version': eb_version}) # clear PYTHONPATH before we go to stage2 # PYTHONPATH doesn't need to (and shouldn't) include the stage1 egg dirs os.environ['PYTHONPATH'] = '' # make sure we're getting the expected EasyBuild packages import easybuild.framework import easybuild.easyblocks pkgs_to_check = [easybuild.framework, easybuild.easyblocks] # vsc is part of EasyBuild 4 if LooseVersion(eb_version) < LooseVersion('4'): import vsc.utils.fancylogger pkgs_to_check.append(vsc.utils.fancylogger) for pkg in pkgs_to_check: if tmpdir not in pkg.__file__: error("Found another %s than expected: %s" % (pkg.__name__, pkg.__file__)) else: debug("Found %s in expected path, good!" % pkg.__name__) debug("templates: %s" % templates) return templates def stage2(tmpdir, templates, install_path, distribute_egg_dir, sourcepath): """STAGE 2: install EasyBuild to temporary dir with EasyBuild from stage 1.""" print('\n') info("+++ STAGE 2: installing EasyBuild in %s with EasyBuild from stage 1...\n" % install_path) preinstallopts = '' eb_looseversion = LooseVersion(templates['version']) # setuptools is no longer required for EasyBuild v4.0 & newer, so skip the setuptools stuff in that case if eb_looseversion < LooseVersion('4.0') and distribute_egg_dir is not None: # inject path to distribute installed in stage 0 into $PYTHONPATH via preinstallopts # other approaches are not reliable, since EasyBuildMeta easyblock unsets $PYTHONPATH; # this is required for the easy_install from stage 0 to work preinstallopts += "export PYTHONPATH=%s:$PYTHONPATH && " % distribute_egg_dir # ensure that (latest) setuptools is installed as well alongside EasyBuild, # since it is a required runtime dependency for recent vsc-base and EasyBuild versions # this is necessary since we provide our own distribute installation during the bootstrap (cfr. stage0) preinstallopts += "%s -m easy_install -U --prefix %%(installdir)s setuptools && " % sys.executable # vsc-install is no longer required for EasyBuild v4.0, so skip pre-installed vsc-install in that case if eb_looseversion < LooseVersion('4.0'): # vsc-install is a runtime dependency for the EasyBuild unit test suite, # and is easily picked up from stage1 rather than being actually installed, so force it vsc_install = "'%s<0.11.4'" % VSC_INSTALL if sourcepath: vsc_install_tarball_paths = glob.glob(os.path.join(sourcepath, 'vsc-install*.tar.gz')) if len(vsc_install_tarball_paths) == 1: vsc_install = vsc_install_tarball_paths[0] preinstallopts += "%s -m easy_install -U --prefix %%(installdir)s %s && " % (sys.executable, vsc_install) templates.update({ 'preinstallopts': preinstallopts, }) # determine PyPI URLs for individual packages pkg_urls = [] for pkg in EASYBUILD_PACKAGES: # vsc-base and vsc-install are not dependencies anymore for EasyBuild v4.0, # so skip them here for recent EasyBuild versions if eb_looseversion >= LooseVersion('4.0') and pkg in [VSC_INSTALL, VSC_BASE]: continue # format of pkg entries in templates: "'<pkg_filename>'," pkg_filename = templates[pkg][1:-2] # the lines below implement a simplified version of the 'pypi_source_urls' and 'derive_alt_pypi_url' functions, # which we can't leverage here, partially because of transitional changes in PyPI (#md5= -> #sha256=) # determine download URL via PyPI's 'simple' API pkg_simple = None try: pkg_simple = std_urllib.urlopen('https://pypi.python.org/simple/%s' % pkg, timeout=10).read() except (std_urllib.URLError, std_urllib.HTTPError) as err: # failing to figure out the package download URl may be OK when source tarballs are provided if sourcepath: info("Ignoring failed attempt to determine '%s' download URL since source tarballs are provided" % pkg) else: raise err if pkg_simple: if IS_PY3: pkg_simple = pkg_simple.decode('utf-8') pkg_url_part_regex = re.compile('/(packages/[^#]+)/%s#' % pkg_filename) res = pkg_url_part_regex.search(pkg_simple) if res: pkg_url = 'https://pypi.python.org/' + res.group(1) pkg_urls.append(pkg_url) elif sourcepath: info("Ignoring failure to determine source URL for '%s' (source tarballs are provided)" % pkg_filename) else: error_msg = "Failed to determine PyPI package URL for %s using pattern '%s': %s\n" error(error_msg % (pkg, pkg_url_part_regex.pattern, pkg_simple)) # vsc-base and vsc-install are no longer required for EasyBuild v4.0.0, # so only include them in 'sources' for older versions sources_tmpl = "%(easybuild-framework)s%(easybuild-easyblocks)s%(easybuild-easyconfigs)s" if eb_looseversion < LooseVersion('4.0'): sources_tmpl = "%(vsc-install)s%(vsc-base)s" + sources_tmpl templates.update({ 'source_urls': '\n'.join(["'%s'," % x for x in pkg_urls]), 'sources': sources_tmpl % templates, 'pythonpath': distribute_egg_dir, }) # create easyconfig file ebfile = os.path.join(tmpdir, 'EasyBuild-%s.eb' % templates['version']) handle = open(ebfile, 'w') ebfile_txt = EASYBUILD_EASYCONFIG_TEMPLATE % templates handle.write(ebfile_txt) handle.close() debug("Contents of generated easyconfig file:\n%s" % ebfile_txt) # set command line arguments for eb eb_args = ['eb', ebfile, '--allow-modules-tool-mismatch'] if print_debug: eb_args.extend(['--debug', '--logtostdout']) if forced_install: info("Performing FORCED installation, as requested...") eb_args.append('--force') # make sure we don't leave any stuff behind in default path $HOME/.local/easybuild # and set build and install path explicitely if LooseVersion(templates['version']) < LooseVersion('1.3.0'): os.environ['EASYBUILD_PREFIX'] = tmpdir os.environ['EASYBUILD_BUILDPATH'] = tmpdir if install_path is not None: os.environ['EASYBUILD_INSTALLPATH'] = install_path else: # only for v1.3 and up eb_args.append('--prefix=%s' % tmpdir) eb_args.append('--buildpath=%s' % tmpdir) if install_path is not None: eb_args.append('--installpath=%s' % install_path) if sourcepath is not None: eb_args.append('--sourcepath=%s' % sourcepath) # make sure EasyBuild can find EasyBuild-*.eb easyconfig file when it needs to; # (for example when HierarchicalMNS is used as module naming scheme, # see https://github.com/easybuilders/easybuild-framework/issues/2393) eb_args.append('--robot-paths=%s:' % tmpdir) # make sure parent modules path already exists (Lmod trips over a non-existing entry in $MODULEPATH) if install_path is not None: modules_path = det_modules_path(install_path) if not os.path.exists(modules_path): os.makedirs(modules_path) debug("Created path %s" % modules_path) debug("Running EasyBuild with arguments '%s'" % ' '.join(eb_args)) sys.argv = eb_args # location to 'eb' command (from stage 1) may be expected to be included in $PATH # it usually is there after stage1, unless 'prep' is called again with another location # (only when stage 0 is not skipped) # cfr. https://github.com/easybuilders/easybuild-framework/issues/2279 curr_path = [x for x in os.environ.get('PATH', '').split(os.pathsep) if len(x) > 0] os.environ['PATH'] = os.pathsep.join([os.path.join(tmpdir, STAGE1_SUBDIR, 'bin')] + curr_path) debug("$PATH: %s" % os.environ['PATH']) # install EasyBuild with EasyBuild from easybuild.main import main as easybuild_main easybuild_main() if print_debug: os.environ['EASYBUILD_DEBUG'] = '1' # make sure the EasyBuild module was actually installed # EasyBuild configuration options that are picked up from configuration files/environment may break the bootstrap, # for example by having $EASYBUILD_VERSION defined or via a configuration file specifies a value for 'stop'... from easybuild.tools.config import build_option, install_path, get_module_syntax from easybuild.framework.easyconfig.easyconfig import ActiveMNS eb_spec = { 'name': 'EasyBuild', 'hidden': False, 'toolchain': {'name': 'dummy', 'version': 'dummy'}, 'version': templates['version'], 'versionprefix': '', 'versionsuffix': '', 'moduleclass': 'tools', } mod_path = os.path.join(install_path('mod'), build_option('suffix_modules_path')) debug("EasyBuild module should have been installed to %s" % mod_path) eb_mod_name = ActiveMNS().det_full_module_name(eb_spec) debug("EasyBuild module name: %s" % eb_mod_name) eb_mod_path = os.path.join(mod_path, eb_mod_name) if get_module_syntax() == 'Lua': eb_mod_path += '.lua' if os.path.exists(eb_mod_path): info("EasyBuild module installed: %s" % eb_mod_path) else: error("EasyBuild module not found at %s, define $EASYBUILD_BOOTSTRAP_DEBUG to debug" % eb_mod_path) def main(): """Main script: bootstrap EasyBuild in stages.""" self_txt = open(__file__).read() if IS_PY3: self_txt = self_txt.encode('utf-8') url = 'https://docs.easybuild.io/en/latest/Installation.html' info("Use of the EasyBuild boostrap script is DEPRECATED (since June 2021).") info("It is strongly recommended to use one of the installation methods outlined at %s instead!\n" % url) if not EASYBUILD_BOOTSTRAP_DEPRECATED: error("The EasyBuild bootstrap script will only run if $EASYBUILD_BOOTSTRAP_DEPRECATED is defined.") else: msg = "You have opted to continue with the EasyBuild bootstrap script by defining " msg += "$EASYBUILD_BOOTSTRAP_DEPRECATED. Good luck!\n" info(msg) info("EasyBuild bootstrap script (version %s, MD5: %s)" % (EB_BOOTSTRAP_VERSION, md5(self_txt).hexdigest())) info("Found Python %s\n" % '; '.join(sys.version.split('\n'))) # disallow running as root, since stage 2 will fail if os.getuid() == 0: error("Don't run the EasyBuild bootstrap script as root, " "since stage 2 (installing EasyBuild with EasyBuild) will fail.") # general option/argument parser if HAVE_ARGPARSE: bs_argparser = argparse.ArgumentParser() bs_argparser.add_argument("prefix", help="Installation prefix directory", type=str) bs_args = bs_argparser.parse_args() # prefix specification install_path = os.path.abspath(bs_args.prefix) else: bs_argparser = optparse.OptionParser(usage="usage: %prog [options] prefix") (bs_opts, bs_args) = bs_argparser.parse_args() # poor method, but should prefer argparse module for better pos arg support. if len(bs_args) < 1: error("Too few arguments\n" + bs_argparser.get_usage()) elif len(bs_args) > 1: error("Too many arguments\n" + bs_argparser.get_usage()) # prefix specification install_path = os.path.abspath(str(bs_args[0])) info("Installation prefix %s" % install_path) sourcepath = EASYBUILD_BOOTSTRAP_SOURCEPATH if sourcepath is not None: info("Fetching sources from %s..." % sourcepath) forcedversion = EASYBUILD_BOOTSTRAP_FORCE_VERSION if forcedversion: info("Forcing specified version %s..." % forcedversion) if IS_PY3 and LooseVersion(forcedversion) < LooseVersion('4'): error('Python 3 support is only available with EasyBuild 4.x but you are trying to install EasyBuild %s' % forcedversion) # create temporary dir for temporary installations tmpdir = tempfile.mkdtemp() debug("Going to use %s as temporary directory" % tmpdir) os.chdir(tmpdir) # check whether a module command is available, we need that modtool = check_module_command(tmpdir) # clean sys.path, remove paths that may contain EasyBuild packages or stuff installed with easy_install orig_sys_path = sys.path[:] sys.path = [] for path in orig_sys_path: include_path = True # exclude path if it's potentially an EasyBuild/VSC package, providing the 'easybuild'/'vsc' namespace, resp. if any([os.path.exists(os.path.join(path, pkg, '__init__.py')) for pkg in ['easyblocks', 'easybuild', 'vsc']]): include_path = False # exclude any .egg paths if path.endswith('.egg'): include_path = False # exclude any path that contains an easy-install.pth file if os.path.exists(os.path.join(path, 'easy-install.pth')): include_path = False if include_path: sys.path.append(path) else: debug("Excluding %s from sys.path" % path) debug("sys.path after cleaning: %s" % sys.path) # install EasyBuild in stages # STAGE 0: install distribute, which delivers easy_install distribute_egg_dir = None if EASYBUILD_BOOTSTRAP_SKIP_STAGE0: info("Skipping stage 0, using local distribute/setuptools providing easy_install") else: setuptools_loc = check_setuptools() if setuptools_loc: info("Suitable setuptools installation already found, skipping stage 0...") sys.path.insert(0, setuptools_loc) else: info("No suitable setuptools installation found, proceeding with stage 0...") distribute_egg_dir = stage0(tmpdir) # STAGE 1: install EasyBuild using easy_install to tmp dir templates = stage1(tmpdir, sourcepath, distribute_egg_dir, forcedversion) # add location to easy_install provided through stage0 to $PATH # this must be done *after* stage1, since $PATH is reset during stage1 if distribute_egg_dir: prep(tmpdir) # make sure the active 'easy_install' is the right one (i.e. it matches the active setuptools installation) check_easy_install_cmd() # STAGE 2: install EasyBuild using EasyBuild (to final target installation dir) stage2(tmpdir, templates, install_path, distribute_egg_dir, sourcepath) # clean up the mess debug("Cleaning up %s..." % tmpdir) shutil.rmtree(tmpdir) print('') info('Bootstrapping EasyBuild completed!\n') if install_path is not None: info('EasyBuild v%s was installed to %s, so make sure your $MODULEPATH includes %s' % (templates['version'], install_path, det_modules_path(install_path))) else: info('EasyBuild v%s was installed to configured install path, make sure your $MODULEPATH is set correctly.' % templates['version']) info('(default config => add "$HOME/.local/easybuild/modules/all" in $MODULEPATH)') print('') info("Run 'module load EasyBuild', and run 'eb --help' to get help on using EasyBuild.") info("Set $EASYBUILD_MODULES_TOOL to '%s' to use the same modules tool as was used now." % modtool) print('') info("By default, EasyBuild will install software to $HOME/.local/easybuild.") info("To install software with EasyBuild to %s, set $EASYBUILD_INSTALLPATH accordingly." % install_path) info("See http://easybuild.readthedocs.org/en/latest/Configuration.html for details on configuring EasyBuild.") # template easyconfig file for EasyBuild EASYBUILD_EASYCONFIG_TEMPLATE = """ easyblock = 'EB_EasyBuildMeta' name = 'EasyBuild' version = '%(version)s' homepage = 'http://easybuilders.github.com/easybuild/' description = \"\"\"EasyBuild is a software build and installation framework written in Python that allows you to install software in a structured, repeatable and robust way.\"\"\" toolchain = {'name': 'dummy', 'version': 'dummy'} source_urls = [%(source_urls)s] sources = [%(sources)s] # EasyBuild is a (set of) Python packages, so it depends on Python # usually, we want to use the system Python, so no actual Python dependency is listed allow_system_deps = [('Python', SYS_PYTHON_VERSION)] preinstallopts = "%(preinstallopts)s" sanity_check_paths = { 'files': ['bin/eb'], 'dirs': ['lib'], } moduleclass = 'tools' """ # check Python version loose_pyver = LooseVersion(python_version()) min_pyver2 = LooseVersion('2.6') min_pyver3 = LooseVersion('3.5') if loose_pyver < min_pyver2 or (loose_pyver >= LooseVersion('3') and loose_pyver < min_pyver3): sys.stderr.write("ERROR: Incompatible Python version: %s (should be Python 2 >= %s or Python 3 >= %s)\n" % (python_version(), min_pyver2, min_pyver3)) sys.exit(1) # distribute_setup.py script (https://pypi.python.org/pypi/distribute) # # A compressed copy of a patched distribute_setup.py (version 0.6.49), generated like so: # >>> import base64 # >>> import zlib # >>> base64.b64encode(zlib.compress(open("distribute_setup.py").read())) # compressed copy below is for setuptools 0.6c11, after applying patch: # # --- distribute_setup.py.orig 2013-07-05 03:50:13.000000000 +0200 # +++ distribute_setup.py 2015-11-27 12:20:12.040032041 +0100 # @@ -528,6 +528,8 @@ # log.warn("--user requires Python 2.6 or later") # raise SystemExit(1) # install_args.append('--user') # + if options.prefix_install: # + install_args.append('--prefix=%s' % options.prefix_install) # return install_args # # def _parse_args(): # @@ -539,6 +541,8 @@ # '--user', dest='user_install', action='store_true', default=False, # help='install in user site package (requires Python 2.6 or later)') # parser.add_option( # + '--prefix', dest='prefix_install', metavar="PATH", help='install in prefix') # + parser.add_option( # '--download-base', dest='download_base', metavar="URL", # default=DEFAULT_URL, # help='alternative URL from where to download the distribute package') # @@ -549,7 +553,7 @@ # def main(version=DEFAULT_VERSION): # """Install or upgrade setuptools and EasyInstall""" # options = _parse_args() # - tarball = download_setuptools(download_base=options.download_base) # + tarball = download_setuptools(version=version, download_base=options.download_base) # return _install(tarball, _build_install_args(options)) # # if __name__ == '__main__': # DISTRIBUTE_SETUP_PY = """ eJztPGtz2ziS3/UrcHK5SGVlxs7Mze6lTlOVmTizrs0mqdjZ/ZC4ZIiEJI75Gj6saH/9dTcAAiAh 2bmZ/XBV592JJaLRaPS7G6BP/qPat9uymEyn05/Ksm3amlcsSeF3uupawdKiaXmW8TYFoMnVmu3L ju140bK2ZF0jWCParmrLMmsAFkdrVvH4nm9E0MjBqNrP2a9d0wJAnHWJYO02bSbrNEP08AWQ8FzA qrWI27Les13ablnazhkvEsaThCbgggjblhUr13Iljf/ly8mEwc+6LnOL+iWNszSvyrpFapeGWoJ3 H4Wz0Q5r8VsHZDHOmkrE6TqN2YOoG2AG0mCmzvEzQCXlrshKnkzytK7Les7KmrjEC8azVtQFB55q ILPjOS0aA1RSsqZkqz1ruqrK9mmxmeCmeVXVZVWnOL2sUBjEj7u74Q7u7qLJ5AbZRfyNaWHEKFjd wecGtxLXaUXbU9IlKqtNzRNbnhEqxUQxr2z0p2bbtWnWf9v3A22aC/15XeS8jbf9kMgrpKf/zmv7 K+yo4nUjJpNegLhGoyWXlZvJpK33L42QmxSVUw5/ur78uLy+urmciK+xgJ1d0fNLlICc0kOwBXtX FsLCpvfRrYDDsWgaqUmJWLOltI1lnCfhM15vmpmcgj/4FZCFsP9IfBVx1/JVJuYz9ica6uFqYGdd WOijGBgeEja2WLDzyUGiT8AOQDYgORBywtYgJEkQexF994cSecJ+68oWdA0fd7koWmD9GpYvQFUN GDxCTBV4AyAmR/IDgPnuRWCW1GQhQoHbnLljCk8A/wPbh/HxsMW2YHraTAN2ioAjOAUzHFKb/mwo INbBB7ViczuUTtlETcXBKEP49GH5z1dXN3M2YBp7Zsvs9eWbV5/e3iz/cfnx+ur9O1hveh79EH3/ X9N+6NPHt/h427bVy+fPq32VRlJUUVlvnisf2TxvwI/F4nny3Lit59PJ9eXNpw8379+/vV6+efW3 y9eDheKLi+nEBvrwt1+WV+/evMfx6fTL5O+i5Qlv+dk/pLd6yS6i88k7cLMvLQuf9KOnzeS6y3MO VsG+ws/kr2UuziqgkL5PXnVAeW1/PhM5TzP55G0ai6JRoK+F9C+EFx8AQSDBQzuaTCakxsoVheAa VvB7rn3TEtm+CGczbRTiK8SomLSVwoGEp8E2r8ClAg+0v4ny+wQ/g2fHcfAj0Y7XRRhcGiSgFKdN MFeTJWCZJctdAohAHzaijXeJwtC7DYICY97CnNCeSlCciJBOLiorUehtGZil2ofaszM5irOyERiO jIVuSkUsbroPHD0AOBi5dSCq4u02+hXgFWFzfJiBelm0fj6/nY03IrGYAcOwd+WO7cr63uaYhrao VELDsDXGcdUPste9sgcz278UZet4tEBHeFguUMjh4zNbOQYexqx3DToMmQMstwOXxnZ1CR+Trtaq Y+c2kUXHAIsQBC3QM7McXDhYBeOr8kEMJ6Fypm1cQhjVrvuFA6D8jXy4TgtY3adPUv0Mbhlyozpv ayF6bVOGs+pSABebTQj/zVlvP225RLD/N5v/A2ZDMqT4WzCQogfRTz2EZTsIawQh5W04dciMzn7D f1cYb1Bt6NEZfoN/agvRH6Kkcn9S77Xu1aLpstZVLSBEAisnoOUBBtW0DQ0bGmqeQk599Z6SpTD4 ueyyhGYRH6W1bjZoncpGEtiSSrpDlb/P+zR8ueKN0Nu2Hici43u1KjJ6qCQKPjCB++y0Oav2p0kE /0fOelMb+DllhgpMM9QXiIHrEjTM8/DidvZ09iijAYL7vVglwoH9H6J18HOAS0brHnFHBEhZJJIP HljUbXg+Z0b+Kh93CzWrKlnpMlWnDEsqCBZkOFLeg6JI7XgxSNoGHFhYiZuXG3IHCzSArvYwYXHx n3MQznLN78Xipu5E73tzTg6g6aCS3FE6TzrKV02ZoRkjLyZmCUvTAAR/hzb3drxZSi4J9LVBdb9Z gklRHtlQWo38zcuky0SDpd2XfjeB4coQcOynnS+WaJz1Jg7ECbs27YDz6M8rquAhwoqapVR6diml 0yzEgaoWa1Hj92EcBR/ZtGmLzKHkvdjbbk8JNHImjcjFH4fWSFXz4dSw4ccFkjmdjWZKl+U8VoVa 6CLt6QKK3pXtm7IrEr81ufNU7v1zWayzNG49dVLFmwF/lfVvecPbtnYJAU+0TKy0ylt34Wylov7C i4bcnsiIM9L7mnK1hzhUybo8/V3u+LB2HhC1YcmPiykUvmq9mYew4T6OystdXIAhyoIxJocdzsBl D0VnG+6Y+zgdcg1IM6NdnULpOoKY3lDwpI0ldh/K6teFPy5OoUoFY0NR8weo0ah8/VJMx/jQBmNe BC1mqsqbAubdFltH1HNTrSL4VHcFBvCIfcgECMiLT7eTOMvLGkmNMe/WhK7Tumnn4P0AjXd6AIj3 2quzs0/WvoLIA/+l+FKEP3d1Datke4mYndYzQO6EWBFhmYCp21iZpdDSNnzhjoms8VgI6CAbGJ3l Qj8PXPEtTsHkh5p95f2hDsa32MOTwvQhm/Hqtc9//W6zfZRMH4mjXPOYw1rGtYC4MvRXS9wcWWCf xlJKcCQT+r15Af14kgOVEyhHPJ1OX5u+b2+vlLv03WUwvqyMqSClyKk4n7aynVvwXMiAcKdov4O0 m7LfFfaoH3iWOti16RVdvoIQ3G452XLvFggX9oSp7KBQ2zenQQ1oimB3Dj/uGHa2QnAS8VavLopE pibYuOcseB7MInYneXKHKzrVFzgYUQudpvelsl5FJDKw3xEH++lqD9R2j8sioQZ4xTGZWok1+hvs dcdtxzPThqf9tVjLtpEWw78lKXPCEQm1q7MsXVEgEnj2oQ4h6gwL24lljt5waaF44Zvbbv61RHXA hp9TgERYCG/+hR5QSZ/gYa5dD6AgIRhqLDLH5g8CWHqgzNGgKoOvY0QH+1INdctcB7WJRDt7iTx/ 9VCmqNUVmm7Sk2NazKOo3heIvfWQk2+mc9yS67MlUYpL4Wj4hH0UPHlOwZVhhIEEGEhnKzC5+zke u+xQCTEcSt8CmhyXdQ3ugqxvgAx2S1qt7SXFAzBIdBFeq7D+wVYsZgh1DPrAk0FSJdlIREtmzdl0 N8xJAUjlBYjMjI2cppIErOXJMYAA3SQZwAN+T7yDRW140y8nCcNeMrIEJWNdboPDbsB9rcqv4bor YnRnygvSsD1OXfU5e/bsfjc7UnrIMx9TBKrp2jJea9dyLZ8fS5qHsJg3l1niS5iR2oe0lB3C0emK /UOp+vDhcKUI1wFJj5/3izwBRw8LmA7Pq/D4jepDrESfkNqYCW84ADw5px/OOyxDpTxaIxzRP6rO apkx3ccZ5BWBN63zQ9pab+ut1nQia4neIURDmIPDAB9QtCbo/5OiG4/vu4r8xVrm1qKQW4LYrkPS WjsBxDRTEQJcKDi8pUILEGvbg6wd6wQ2jScsNEmGdSfg/RDJXnPV02985ULYnW7FDqMmZtYHhMYq IdITINHEqLEkN2E25uwVLHAX+MKocniap/4kce3zUKT0E0s8sJ7tdqyR3mvhJQTNN680f7eAFHFH hKRpGXFNUlCInY76+BQieBC9f/s6Om3wkBQP4CP8Z9S3/4joZODErIn6xXKHGqNq4GPhjF9D36gi Xz80tOblAyTiYHRL+0glrDIeiy2YkdDHEIMMIW2wkTwGc4n/VNxDjHVOayCnU5uw5h5W0DXWNI5n w0xXX0Cxuvx+UoBqdZ8hUr9DnDu322nPsOF7hoXHMJDoxUd+eAV6cm+zZe2WXoYDpsFN1YF9ScNe d8CAyUAJ/l4+oAqITMgT/rJrMZtGp7Tje2OyuoIaZoIWc+ZMWo4ifiBPNd/igwkSyvCA6KUDK63J iUyjSdLNqhlz5jn+VocWip+jmGF4AXbAhx5uzlxqvKp0Aph3LBNt0DBU+96nD/nZq5niN2paaPdf 54POrTqdwYst+NHinsZwTB4KxjlMHR4SKJBBHW27mhGWca7g00rwyfLUBdiqiVWcOHAGElh94sRq QASDnbju/JCnGTr3Q3C9q+drSNT7KwdIgOKKFQDX1LRWzaj+0KE3EsN8LIQAA554gkfPc6BiWa5+ Dfvz6lmkTyoqKDahopOR8bEGhuPbFOHfMsdxuTa9oJSeOu0xN+wRt+aO7lmMXRCxav9ATAKjU4Eq fPKhlyw2zUZVKLfsSJ25NcYPwgJfHK0LD909mUvavsHtDUgZeUDNy7EL9PoeCX7E8wx9+M8ofxnI R7Pc3KnPVXp/2SdZKqW/LB7SuizQ2AZ5vV10Yzm8hfKOVaLO06bRVw9lGX2KHdD7tKqApumRbYzp U7md14kTwOE8T4qr7RO7w+Ky9CSCCcEwLxqws91aUjUcVI8fS1MdMqxG4FBnHt/fY2Y+8HdPcieT mVuz9BHYZJcnKq5BOIzvUatT/OarFQ7aiszmL3/55QwlifEMpCo/f4OtpKNs/GjQPxSgnhrkjZ5a gf5A4QLE5/c939xHR1kxe8TLPMq5Yxb9h5mWxQqnhFLa4q+i1KC5kUTt2D68jkLrT7JdezC2es4g FWbqSS/x2Dj9GuqoYwJdH3EpTKnzbV5vHl4O/JFJxtVtUQ34WX+AkJ2Ir1YMh1rr4uVtn9XR8Fzf KhVFl4uay/umdo8IQeV9bZn/nZ3VsFt55UZuY1guwDYARwSr1m2D/XTU4wUGNYnH04Bqy0oJh2ZW WdqGAa6zCGafzwbnkBYTNAPtxRSu4WGYpAoLVUmF93A5A+WUl2R/lPzx98occpvPBIncHdP5rbQa KoG9XSPqgE5RzO1vdSyKHWtvb8q3Sj/bYx1SK0fqOlT465jT4al1xqrv//oM2HvPguRwpBd3wnYi AKuKgQRP0mHl26CS1l2KpBSSKY0QOQZ2+yh43Pahrzs0Gbd4UZfckA3jDVnhCDNlmLxrIswi3TNu F+NHedCN6UlEbwcMqqdaUKjH8QW5bVdnnVTnZl+JEb+wGWudlb36cPXvJNhQZ4rU4TKgnq5q2hcJ 7Tdt7JJJtg38gjphiWiFurCJ8RzvIPRBEfU2GWbY/fnjYkhepIeG2m2/AWQVZXJp0HXdrPGsonhB pynSN6J6km9MvxKxVV0+pIlI5MtA6VrB268dNVZrJFF3PfO5Oc3RjVa7HnJDiY8yvxQUNmDofEgw IJKHQL5bzmPfJl+AAFq2UB0drqCczAxsVMqxsW+Kqi15NhGJIlFBhC4kHthUcSYvkhrhOXSgtR/u 8nmlqicr0gCHz996mj7WXePDJB1KojykHM+pFInhoZQUdc7tlnxDWnowLz3SWx/wxp037q7butGf WaJZ08G69JB0yG15/0IMResWBL9XnmN6ISblFRAprEOBjHdFvDWZnXkyDJ4f1YizcYx2siyll/Qk BPUk9FtcEqpKK5bz+h5vMpSM0/EzNxNW3UYVT3lygS8NBWex+6YBJBJ4rSgTZ6o1cSa+0nt8kECf 5byAGJ4Etz2SF4eQ1CIuaw2pXqSihPPld7eYsEgKwJX4Bl5YoVQPn9/2fRBsF9rJ7Gf31aVbSK70 tInGQXeevG+l9SKxbto3IlvLE4PFNII6Pxd4F6NZYLQyR13q7QA6W1cQ8uCW7lXUIEIpL/way2tb +nK83p+5I4LeHt+pLHcFVvJ5meDblzKy4BkHAdg9CXMIqpGkoN7U6QNFSpqI3eEGgv6uDd5HBaRr QYSM3g0AQhUH4CNMVjsK0MfLJJhnRESOr7bS/Ru8tSp061PjwbMFZR8QnVZ7thGtwhXO3EspKvGL y2pvf4eqD2oLdc9SMlS+I6EBFOPNZUybA6APt33GocUyyjT0AEb9bD3p6xxYiTwtxCoF4pTpalh1 /gce70QT5tCj7gg1fC1QqmJwU8JARryCcjcJ1Rquk9R0LYhbEf5zFBDv6GGv6Pvv/yLTojjNQX5U nwGB538+P7eyv2wdKdlrpFL7+3Tlo0B3IFiD7Ldptu3bbmay/2bhizn73mIRGhnOF3UIGC7miOfF zFuXxHlFMBG2JCRgZM7lhnxDpKHE7AeoJfWh79RlhOhe7BdaAyO8QQH6izQHSEAww0ScsMkr75pB kBTS5RnA1Ztwb7aouyQPzBKHzBtonTXuEEnnn94mipa4y6DRdQSScrwF4oyMFd7ZGLCjc9UnAMZb 2NkRQN18tUz2f3GVWKooTs6A+xkU3BfjYpsuag8Ked/VD0K2TFabEJRwqnwLvhOKl8jE4EUz+827 UL2KbkIA/f6pfwvHvE4M3jRQby/rmMX6bk2pX8/qizFVHrve0VpZejXFDEVFhBW/ps/xUgeM8YdD GdpUdg/0XeuGfdDvXf+AIZreZxhcz5KX4q/3TSvyS4ytF1bSZBGunZpuUMyGm5CFhGcbfiQSfCHP Vfw4nL6FjabvB4P1SnkOBPkBR2S4ludaEMwKYTW1GgecENFFSfU+f/SeoAhNrbyNBIp4kiwlDlNS a57g3dmmXQS2POEhp2tDi6BpsbvYgrchyDXvMtUBMNdztyKrFjoDQzdC8qQ/GqBUi4XHpDsLnkKt 6uBpel22B5gmtfyB14vph1c3f4W0aUSVbgE+YS19z/AMr272SzoXOu0VP318OzXs0FzyXmWWVOk/ T4E5Gl7wpTxDXdQtzS1Hv52qHSilmOtEVO3IVjCdl5cgC5VC9T6CY1N4U4B0E1tltaqRtuYc/PyB i9tGe6+O/V0LCkGXvNkrKK2++u9qLFyTkO2sp7xSt/Bfil9os3SeOlY5fvv9mLcFj5zSNUqsRZfU 7lwukTHLpfpLDH2GT+yCCf8D2cp1xw== """ if IS_PY3: DISTRIBUTE_SETUP_PY = DISTRIBUTE_SETUP_PY.encode('ascii') DISTRIBUTE_SETUP_PY = codecs.decode(codecs.decode(DISTRIBUTE_SETUP_PY, "base64"), "zlib") # run main function as body of script main()
unknown
codeparrot/codeparrot-clean
// Copyright 2024 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //nolint:unparam package validate import ( "fmt" "math/rand/v2" "testing" "time" "github.com/anishathalye/porcupine" "go.uber.org/zap" "go.uber.org/zap/zaptest" "go.etcd.io/etcd/tests/v3/robustness/model" ) func TestValidateSerializableOperations(t *testing.T) { tcs := []struct { name string persistedRequests []model.EtcdRequest operations []porcupine.Operation expectError string }{ { name: "Success", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 1, 0), Output: rangeResponse(0), }, { Input: rangeRequest("a", "z", 2, 0), Output: rangeResponse(1, keyValueRevision("a", "1", 2)), }, { Input: rangeRequest("a", "z", 3, 0), Output: rangeResponse(2, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), ), }, { Input: rangeRequest("a", "z", 4, 0), Output: rangeResponse(3, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), keyValueRevision("c", "3", 4), ), }, { Input: rangeRequest("a", "z", 4, 3), Output: rangeResponse(3, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), keyValueRevision("c", "3", 4), ), }, { Input: rangeRequest("a", "z", 4, 4), Output: rangeResponse(3, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), keyValueRevision("c", "3", 4), ), }, { Input: rangeRequest("a", "z", 4, 2), Output: rangeResponse(3, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), ), }, { Input: rangeRequest("b\x00", "z", 4, 2), Output: rangeResponse(1, keyValueRevision("c", "3", 4), ), }, { Input: rangeRequest("b", "", 4, 0), Output: rangeResponse(1, keyValueRevision("b", "2", 3), ), }, { Input: rangeRequest("b", "", 2, 0), Output: rangeResponse(0), }, }, }, { name: "Invalid order", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 4, 0), Output: rangeResponse(3, keyValueRevision("c", "3", 4), keyValueRevision("b", "2", 3), keyValueRevision("a", "1", 2), ), }, }, expectError: errRespNotMatched.Error(), }, { name: "Invalid count", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 1, 0), Output: rangeResponse(1), }, }, expectError: errRespNotMatched.Error(), }, { name: "Invalid keys", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 2, 0), Output: rangeResponse(3, keyValueRevision("b", "2", 3), ), }, }, expectError: errRespNotMatched.Error(), }, { name: "Invalid revision", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 2, 0), Output: rangeResponse(3, keyValueRevision("a", "1", 2), keyValueRevision("b", "2", 3), ), }, }, expectError: errRespNotMatched.Error(), }, { name: "Error", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 2, 0), Output: errorResponse(model.ErrEtcdFutureRev), }, { Input: rangeRequest("a", "z", 2, 0), Output: errorResponse(fmt.Errorf("timeout")), }, }, }, { name: "Future rev returned", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 6, 0), Output: errorResponse(model.ErrEtcdFutureRev), }, }, }, { name: "Future rev success", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 6, 0), Output: rangeResponse(0), }, }, expectError: errFutureRevRespRequested.Error(), }, { name: "Future rev failure", persistedRequests: []model.EtcdRequest{ putRequest("a", "1"), putRequest("b", "2"), putRequest("c", "3"), }, operations: []porcupine.Operation{ { Input: rangeRequest("a", "z", 6, 0), Output: errorResponse(fmt.Errorf("timeout")), }, }, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { replay := model.NewReplay(tc.persistedRequests) result := validateSerializableOperations(zaptest.NewLogger(t), tc.operations, replay) if result.Message != tc.expectError { t.Errorf("validateSerializableOperations(...), got: %q, want: %q", result.Message, tc.expectError) } }) } } func rangeRequest(start, end string, rev, limit int64) model.EtcdRequest { return model.EtcdRequest{ Type: model.Range, Range: &model.RangeRequest{ RangeOptions: model.RangeOptions{ Start: start, End: end, Limit: limit, }, Revision: rev, }, } } func rangeResponse(count int64, kvs ...model.KeyValue) model.MaybeEtcdResponse { if kvs == nil { kvs = []model.KeyValue{} } return model.MaybeEtcdResponse{ EtcdResponse: model.EtcdResponse{ Range: &model.RangeResponse{ KVs: kvs, Count: count, }, }, } } func errorResponse(err error) model.MaybeEtcdResponse { return model.MaybeEtcdResponse{ Error: err.Error(), } } func keyValueRevision(key, value string, rev int64) model.KeyValue { return model.KeyValue{ Key: key, ValueRevision: model.ValueRevision{ Value: model.ToValueOrHash(value), ModRevision: rev, Version: 1, }, } } func BenchmarkValidateLinearizableOperations(b *testing.B) { lg := zap.NewNop() b.Run("Successes", func(b *testing.B) { history := allPutSuccesses(1000) shuffles := shuffleHistory(history, b.N) b.ResetTimer() validateShuffles(b, lg, shuffles, time.Second) }) b.Run("AllFailures", func(b *testing.B) { history := allPutFailures(10) shuffles := shuffleHistory(history, b.N) b.ResetTimer() validateShuffles(b, lg, shuffles, time.Second) }) b.Run("PutFailuresWithRead", func(b *testing.B) { history := putFailuresWithRead(b, 8) shuffles := shuffleHistory(history, b.N) b.ResetTimer() validateShuffles(b, lg, shuffles, time.Second) }) } func allPutSuccesses(concurrencyCount int) []porcupine.Operation { ops := []porcupine.Operation{} for i := 0; i < concurrencyCount; i++ { ops = append(ops, porcupine.Operation{ ClientId: i, Input: putRequest("key", "value"), Output: txnResponse(int64(i)+2, model.EtcdOperationResult{}), Call: int64(i), Return: int64(i) + int64(concurrencyCount), }) } return ops } func putFailuresWithRead(b *testing.B, concurrencyCount int) []porcupine.Operation { ops := []porcupine.Operation{} for i := 0; i < concurrencyCount; i++ { ops = append(ops, porcupine.Operation{ ClientId: i, Input: putRequest(fmt.Sprintf("key%d", i), "value"), Output: errorResponse(fmt.Errorf("timeout")), Call: int64(i), Return: int64(i) + int64(concurrencyCount), }) } requests := []model.EtcdRequest{} for _, op := range ops { requests = append(requests, op.Input.(model.EtcdRequest)) } replay := model.NewReplay(requests) state, err := replay.StateForRevision(int64(concurrencyCount) + 1) if err != nil { b.Fatal(err) } request := rangeRequest("key", "kez", 0, 0) _, resp := state.Step(request) ops = append(ops, porcupine.Operation{ ClientId: 0, Input: request, Output: resp, Call: int64(concurrencyCount) + 1, Return: int64(concurrencyCount) + 2, }) return ops } func allPutFailures(concurrencyCount int) []porcupine.Operation { ops := []porcupine.Operation{} for i := 0; i < concurrencyCount; i++ { ops = append(ops, porcupine.Operation{ ClientId: i, Input: putRequest("key", "value"), Output: errorResponse(fmt.Errorf("timeout")), Call: int64(i), Return: int64(i) + int64(concurrencyCount), }) } return ops } func shuffleHistory(history []porcupine.Operation, shuffleCount int) [][]porcupine.Operation { shuffles := make([][]porcupine.Operation, shuffleCount) for i := 0; i < shuffleCount; i++ { historyCopy := make([]porcupine.Operation, len(history)) copy(historyCopy, history) rand.Shuffle(len(historyCopy), func(i, j int) { historyCopy[i], historyCopy[j] = historyCopy[j], historyCopy[i] }) shuffles[i] = historyCopy } return shuffles } func validateShuffles(b *testing.B, lg *zap.Logger, shuffles [][]porcupine.Operation, duration time.Duration) { for i := 0; i < len(shuffles); i++ { result := validateLinearizableOperationsAndVisualize(lg, shuffles[i], duration) if err := result.Error(); err != nil { b.Fatalf("Not linearizable: %v", err) } } }
go
github
https://github.com/etcd-io/etcd
tests/robustness/validate/operations_test.go
# -*- coding: utf-8 -*- # # Test links: # https://www.oboom.com/B7CYZIEB/10Mio.dat import re from module.common.json_layer import json_loads from module.plugins.internal.Hoster import Hoster from module.plugins.captcha.ReCaptcha import ReCaptcha class OboomCom(Hoster): __name__ = "OboomCom" __type__ = "hoster" __version__ = "0.37" __status__ = "testing" __pattern__ = r'https?://(?:www\.)?oboom\.com/(?:#(?:id=|/)?)?(?P<ID>\w{8})' __description__ = """Oboom.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("stanley", "stanley.foerster@gmail.com")] RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX" def setup(self): self.chunk_limit = 1 self.multiDL = self.resume_download = self.premium def process(self, pyfile): self.pyfile.url.replace(".com/#id=", ".com/#") self.pyfile.url.replace(".com/#/", ".com/#") self.html = self.load(pyfile.url) self.get_file_id(self.pyfile.url) self.get_session_token() self.get_fileInfo(self.session_token, self.file_id) self.pyfile.name = self.file_name self.pyfile.size = self.file_size if not self.premium: self.solve_captcha() self.get_download_ticket() self.download("http://%s/1.0/dlh" % self.download_domain, get={'ticket': self.download_ticket, 'http_errors': 0}) def load_url(self, url, get=None): if get is None: get = {} return json_loads(self.load(url, get)) def get_file_id(self, url): self.file_id = re.match(OboomCom.__pattern__, url).group('ID') def get_session_token(self): if self.premium: accountInfo = self.account.get_data(self.user, True) if "session" in accountInfo: self.session_token = accountInfo['session'] else: self.fail(_("Could not retrieve premium session")) else: apiUrl = "http://www.oboom.com/1.0/guestsession" result = self.load_url(apiUrl) if result[0] == 200: self.session_token = result[1] else: self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0]) def solve_captcha(self): recaptcha = ReCaptcha(self) for _i in xrange(5): response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY) apiUrl = "http://www.oboom.com/1.0/download/ticket" params = {'recaptcha_challenge_field': challenge, 'recaptcha_response_field': response, 'download_id': self.file_id, 'token': self.session_token} result = self.load_url(apiUrl, params) if result[0] == 200: self.download_token = result[1] self.download_auth = result[2] self.captcha.correct() self.wait(30) break elif result[0] == 400: if result[1] == "incorrect-captcha-sol": self.captcha.invalid() elif result[1] == "captcha-timeout": self.captcha.invalid() elif result[1] == "forbidden": self.retry(5, 15 * 60, _("Service unavailable")) elif result[0] == 403: if result[1] == -1: #: Another download is running self.set_wait(15 * 60) else: self.set_wait(result[1], True) self.wait() self.retry(5) else: self.captcha.invalid() self.fail(_("Received invalid captcha 5 times")) def get_fileInfo(self, token, fileId): apiUrl = "http://api.oboom.com/1.0/info" params = {'token': token, 'items': fileId, 'http_errors': 0} result = self.load_url(apiUrl, params) if result[0] == 200: item = result[1][0] if item['state'] == "online": self.file_size = item['size'] self.file_name = item['name'] else: self.offline() else: self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1])) def get_download_ticket(self): apiUrl = "http://api.oboom.com/1/dl" params = {'item': self.file_id, 'http_errors': 0} if self.premium: params['token'] = self.session_token else: params['token'] = self.download_token params['auth'] = self.download_auth result = self.load_url(apiUrl, params) if result[0] == 200: self.download_domain = result[1] self.download_ticket = result[2] elif result[0] == 421: self.retry(wait_time=result[2] + 60, msg=_("Connection limit exceeded")) else: self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for the original form of Residual Networks. The 'v1' residual networks (ResNets) implemented in this module were proposed by: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 Other variants were introduced in: [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 The networks defined in this module utilize the bottleneck building block of [1] with projection shortcuts only for increasing depths. They employ batch normalization *after* every weight layer. This is the architecture used by MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1' architecture and the alternative 'v2' architecture of [2] which uses batch normalization *before* every weight layer in the so-called full pre-activation units. Typical use: from tensorflow.contrib.slim.nets import resnet_v1 ResNet-101 for image classification into 1000 classes: # inputs has shape [batch, 224, 224, 3] with slim.arg_scope(resnet_v1.resnet_arg_scope()): net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False) ResNet-101 for semantic segmentation into 21 classes: # inputs has shape [batch, 513, 513, 3] with slim.arg_scope(resnet_v1.resnet_arg_scope()): net, end_points = resnet_v1.resnet_v1_101(inputs, 21, is_training=False, global_pool=False, output_stride=16) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets import resnet_utils resnet_arg_scope = resnet_utils.resnet_arg_scope slim = tf.contrib.slim @slim.add_arg_scope def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None): """Bottleneck residual unit variant with BN after convolutions. This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for its definition. Note that we use here the bottleneck variant which has an extra bottleneck layer. When putting together two consecutive ResNet blocks that use this unit, one should use stride = 2 in the last unit of the first block. Args: inputs: A tensor of size [batch, height, width, channels]. depth: The depth of the ResNet unit output. depth_bottleneck: The depth of the bottleneck layers. stride: The ResNet unit's stride. Determines the amount of downsampling of the units output compared to its input. rate: An integer, rate for atrous convolution. outputs_collections: Collection to add the ResNet unit output. scope: Optional variable_scope. Returns: The ResNet unit's output. """ with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) if depth == depth_in: shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') else: shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride, activation_fn=None, scope='shortcut') residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1, scope='conv1') residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2') residual = slim.conv2d(residual, depth, [1, 1], stride=1, activation_fn=None, scope='conv3') output = tf.nn.relu(shortcut + residual) return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output) def resnet_v1(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope=None): """Generator for v1 ResNet models. This function generates a family of ResNet v1 models. See the resnet_v1_*() methods for specific model instantiations, obtained by selecting different block instantiations that produce ResNets of various depths. Training for image classification on Imagenet is usually done with [224, 224] inputs, resulting in [7, 7] feature maps at the output of the last ResNet block for the ResNets defined in [1] that have nominal stride equal to 32. However, for dense prediction tasks we advise that one uses inputs with spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In this case the feature maps at the ResNet output will have spatial shape [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] and corners exactly aligned with the input image corners, which greatly facilitates alignment of the features to the image. Using as input [225, 225] images results in [8, 8] feature maps at the output of the last ResNet block. For dense prediction tasks, the ResNet needs to run in fully-convolutional (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all have nominal stride equal to 32 and a good choice in FCN mode is to use output_stride=16 in order to increase the density of the computed features at small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. Args: inputs: A tensor of size [batch, height_in, width_in, channels]. blocks: A list of length equal to the number of ResNet blocks. Each element is a resnet_utils.Block object describing the units in the block. num_classes: Number of predicted classes for classification tasks. If None we return the features before the logit layer. is_training: whether is training or not. global_pool: If True, we perform global average pooling before computing the logits. Set to True for image classification, False for dense prediction. output_stride: If None, then the output will be computed at the nominal network stride. If output_stride is not None, it specifies the requested ratio of input to output spatial resolution. include_root_block: If True, include the initial convolution followed by max-pooling, if False excludes it. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. Returns: net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. If global_pool is False, then height_out and width_out are reduced by a factor of output_stride compared to the respective height_in and width_in, else both height_out and width_out equal one. If num_classes is None, then net is the output of the last ResNet block, potentially after global average pooling. If num_classes is not None, net contains the pre-softmax activations. end_points: A dictionary from components of the network to the corresponding activation. Raises: ValueError: If the target output_stride is not valid. """ with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc: end_points_collection = sc.name + '_end_points' with slim.arg_scope([slim.conv2d, bottleneck, resnet_utils.stack_blocks_dense], outputs_collections=end_points_collection): with slim.arg_scope([slim.batch_norm], is_training=is_training): net = inputs if include_root_block: if output_stride is not None: if output_stride % 4 != 0: raise ValueError('The output_stride needs to be a multiple of 4.') output_stride /= 4 net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) if global_pool: # Global average pooling. net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) if num_classes is not None: net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits') # Convert end_points_collection into a dictionary of end_points. end_points = slim.utils.convert_collection_to_dict(end_points_collection) if num_classes is not None: end_points['predictions'] = slim.softmax(net, scope='predictions') return net, end_points resnet_v1.default_image_size = 224 def resnet_v1_50(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_50'): """ResNet-50 model of [1]. See resnet_v1() for arg and return description.""" blocks = [ resnet_utils.Block( 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]), resnet_utils.Block( 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]), resnet_utils.Block( 'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]), resnet_utils.Block( 'block4', bottleneck, [(2048, 512, 1)] * 3) ] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) def resnet_v1_101(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_101'): """ResNet-101 model of [1]. See resnet_v1() for arg and return description.""" blocks = [ resnet_utils.Block( 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]), resnet_utils.Block( 'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]), resnet_utils.Block( 'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]), resnet_utils.Block( 'block4', bottleneck, [(2048, 512, 1)] * 3) ] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) def resnet_v1_152(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_152'): """ResNet-152 model of [1]. See resnet_v1() for arg and return description.""" blocks = [ resnet_utils.Block( 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]), resnet_utils.Block( 'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]), resnet_utils.Block( 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]), resnet_utils.Block( 'block4', bottleneck, [(2048, 512, 1)] * 3)] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) def resnet_v1_200(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_200'): """ResNet-200 model of [2]. See resnet_v1() for arg and return description.""" blocks = [ resnet_utils.Block( 'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]), resnet_utils.Block( 'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]), resnet_utils.Block( 'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]), resnet_utils.Block( 'block4', bottleneck, [(2048, 512, 1)] * 3)] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope)
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true require 'test/unit' require 'tempfile' class TestTempfile < Test::Unit::TestCase LIB_TEMPFILE_RB_PATH = File.expand_path(__dir__ + "/../lib/tempfile.rb") def initialize(*) super @tempfile = nil end def tempfile(*args, **kw, &block) t = Tempfile.new(*args, **kw, &block) @tempfile = (t unless block) end def teardown if @tempfile @tempfile.close! end end def test_leakchecker assert_instance_of(Tempfile, Tempfile.allocate) end def test_basic t = tempfile("foo") path = t.path t.write("hello world") t.close assert_equal "hello world", File.read(path) end def test_saves_in_given_directory subdir = File.join(Dir.tmpdir, "tempfile-test-#{rand}") Dir.mkdir(subdir) begin tempfile = Tempfile.new("foo", subdir) tempfile.close begin assert_equal subdir, File.dirname(tempfile.path) ensure tempfile.unlink end ensure Dir.rmdir(subdir) end end def test_basename t = tempfile("foo") assert_match(/^foo/, File.basename(t.path)) end def test_default_basename t = tempfile assert_file.exist?(t.path) end def test_basename_with_suffix t = tempfile(["foo", ".txt"]) assert_match(/^foo/, File.basename(t.path)) assert_match(/\.txt$/, File.basename(t.path)) end def test_dup t = tempfile t2 = t.dup t2.close assert_equal true, t2.closed? assert_equal false, t.closed? end def test_clone t = tempfile t2 = t.clone t2.close assert_equal true, t2.closed? assert_equal false, t.closed? end def test_unlink t = tempfile("foo") path = t.path t.close assert_file.exist?(path) t.unlink assert_file.not_exist?(path) assert_nil t.path end def test_unlink_silently_fails_on_windows tempfile = tempfile("foo") path = tempfile.path begin assert_nothing_raised do tempfile.unlink end ensure tempfile.close File.unlink(path) if File.exist?(path) end end def test_unlink_before_close_works_on_posix_systems tempfile = tempfile("foo") begin path = tempfile.path tempfile.unlink assert_file.not_exist?(path) tempfile.write("hello ") tempfile.write("world\n") tempfile.rewind assert_equal "hello world\n", tempfile.read ensure tempfile.close tempfile.unlink end end unless /mswin|mingw/ =~ RUBY_PLATFORM def test_close_and_close_p t = tempfile("foo") assert_not_predicate(t, :closed?) t.close assert_predicate(t, :closed?) end def test_close_with_unlink_now_true_works t = tempfile("foo") path = t.path t.close(true) assert_predicate(t, :closed?) assert_nil t.path assert_file.not_exist?(path) end def test_close_with_unlink_now_true_does_not_unlink_if_already_unlinked t = tempfile("foo") path = t.path t.unlink File.open(path, "w").close begin t.close(true) assert_file.exist?(path) ensure File.unlink(path) rescue nil end end unless /mswin|mingw/ =~ RUBY_PLATFORM def test_close_bang_works t = tempfile("foo") path = t.path t.close! assert_predicate(t, :closed?) assert_nil t.path assert_file.not_exist?(path) end def test_close_bang_does_not_unlink_if_already_unlinked t = tempfile("foo") path = t.path t.unlink File.open(path, "w").close begin t.close! assert_file.exist?(path) ensure File.unlink(path) rescue nil end end unless /mswin|mingw/ =~ RUBY_PLATFORM def test_finalizer_removes_file assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<~RUBY) do |(filename,*), (error,*)| file = Tempfile.new("foo") puts file.path RUBY assert_file.not_exist?(filename) assert_nil error end end def test_finalizer_removes_file_when_dup assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<~RUBY) do |(filename,*), (error,*)| file = Tempfile.new("foo") file.dup puts file.path RUBY assert_file.not_exist?(filename) assert_nil error end end def test_finalizer_removes_file_when_clone assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<~RUBY) do |(filename,*), (error,*)| file = Tempfile.new("foo") file.clone puts file.path RUBY assert_file.not_exist?(filename) assert_nil error end end def test_finalizer_does_not_unlink_if_already_unlinked assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<-'EOS') do |(filename,*), (error,*)| file = Tempfile.new('foo') path = file.path puts path file.close! File.open(path, "w").close EOS assert_file.exist?(filename) File.unlink(filename) assert_nil error end assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<-'EOS') do |(filename,*), (error,*)| file = Tempfile.new('foo') path = file.path file.unlink puts path File.open(path, "w").close EOS if !filename.empty? # POSIX unlink semantics supported, continue with test assert_file.exist?(filename) File.unlink(filename) end assert_nil error end end unless /mswin|mingw/ =~ RUBY_PLATFORM def test_close_does_not_make_path_nil t = tempfile("foo") t.close assert_not_nil t.path end def test_close_flushes_buffer t = tempfile("foo") t.write("hello") t.close assert_equal 5, File.size(t.path) end def test_tempfile_is_unlinked_when_ruby_exits assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<-'EOS') do |(filename), (error)| puts Tempfile.new('foo').path EOS assert_file.for("tempfile must not be exist after GC.").not_exist?(filename) assert_nil(error) end end def test_tempfile_finalizer_does_not_run_if_unlinked bug8768 = '[ruby-core:56521] [Bug #8768]' assert_in_out_err("-r#{LIB_TEMPFILE_RB_PATH}", <<-'EOS') do |(filename), (error)| tmp = Tempfile.new('foo') puts tmp.path tmp.close tmp.unlink $DEBUG = true EOS assert_file.not_exist?(filename) assert_nil(error, "#{bug8768} we used to get a confusing 'removing ...done' here") end end def test_size_flushes_buffer_before_determining_file_size t = tempfile("foo") t.write("hello") assert_equal 0, File.size(t.path) assert_equal 5, t.size assert_equal 5, File.size(t.path) end def test_size_works_if_file_is_closed t = tempfile("foo") t.write("hello") t.close assert_equal 5, t.size end def test_size_on_empty_file t = tempfile("foo") t.write("") t.close assert_equal 0, t.size end def test_concurrency threads = [] tempfiles = [] lock = Thread::Mutex.new cond = Thread::ConditionVariable.new start = false 4.times do threads << Thread.new do lock.synchronize do while !start cond.wait(lock) end end result = [] 30.times do result << Tempfile.new('foo') end Thread.current[:result] = result end end lock.synchronize do start = true cond.broadcast end threads.each do |thread| thread.join tempfiles |= thread[:result] end filenames = tempfiles.map { |f| f.path } begin assert_equal filenames.size, filenames.uniq.size ensure tempfiles.each do |tempfile| tempfile.close! end end end module M end def test_extend o = tempfile("foo") o.extend M assert(M === o, "[ruby-dev:32932]") end def test_tempfile_encoding_nooption default_external=Encoding.default_external t = tempfile("TEST") t.write("\xE6\x9D\xBE\xE6\xB1\x9F") t.rewind assert_equal(default_external,t.read.encoding) end def test_tempfile_encoding_ascii8bit t = tempfile("TEST",:encoding=>"ascii-8bit") t.write("\xE6\x9D\xBE\xE6\xB1\x9F") t.rewind assert_equal(Encoding::ASCII_8BIT,t.read.encoding) end def test_tempfile_encoding_ascii8bit2 t = tempfile("TEST",Dir::tmpdir,:encoding=>"ascii-8bit") t.write("\xE6\x9D\xBE\xE6\xB1\x9F") t.rewind assert_equal(Encoding::ASCII_8BIT,t.read.encoding) end def test_binmode t = tempfile("TEST", mode: IO::BINARY) if IO::BINARY.nonzero? assert(t.binmode?) t.open assert(t.binmode?, 'binmode after reopen') else assert_equal(0600, t.stat.mode & 0777) end end def test_create_with_block path = nil Tempfile.create("tempfile-create") {|f| path = f.path assert_file.exist?(path) } assert_file.not_exist?(path) Tempfile.create("tempfile-create") {|f| path = f.path f.close File.unlink(f.path) } assert_file.not_exist?(path) end def test_create_without_block path = nil f = Tempfile.create("tempfile-create") path = f.path assert_file.exist?(path) f.close assert_file.exist?(path) ensure f&.close File.unlink path if path end def test_create_default_basename path = nil Tempfile.create {|f| path = f.path assert_file.exist?(path) } assert_file.not_exist?(path) end def test_open Tempfile.open {|f| file = f.open assert_kind_of File, file assert_equal f.to_i, file.to_i } end def test_open_traversal_dir assert_mktmpdir_traversal do |traversal_path| t = Tempfile.open([traversal_path, 'foo']) t.path ensure t&.close! end end def test_new_traversal_dir assert_mktmpdir_traversal do |traversal_path| t = Tempfile.new(traversal_path + 'foo') t.path ensure t&.close! end end def test_create_traversal_dir assert_mktmpdir_traversal do |traversal_path| t = Tempfile.create(traversal_path + 'foo') t.path ensure if t t.close File.unlink(t.path) end end end def assert_mktmpdir_traversal Dir.mktmpdir do |target| target = target.chomp('/') + '/' traversal_path = target.sub(/\A\w:/, '') # for DOSISH traversal_path = Array.new(target.count('/')-2, '..').join('/') + traversal_path actual = yield traversal_path assert_not_send([File.absolute_path(actual), :start_with?, target]) end end def test_create_anonymous_without_block t = Tempfile.create(anonymous: true) assert_equal(File, t.class) assert_equal(0600, t.stat.mode & 0777) unless /mswin|mingw/ =~ RUBY_PLATFORM t.puts "foo" t.rewind assert_equal("foo\n", t.read) t.close ensure t.close if t end def test_create_anonymous_with_block result = Tempfile.create(anonymous: true) {|t| assert_equal(File, t.class) assert_equal(0600, t.stat.mode & 0777) unless /mswin|mingw/ =~ RUBY_PLATFORM t.puts "foo" t.rewind assert_equal("foo\n", t.read) :result } assert_equal(:result, result) end def test_create_anonymous_removes_file Dir.mktmpdir {|d| t = Tempfile.create("", d, anonymous: true) t.close assert_equal([], Dir.children(d)) } end def test_create_anonymous_path Dir.mktmpdir {|d| begin t = Tempfile.create("", d, anonymous: true) assert_equal(File.join(d, ""), t.path) ensure t.close if t end } end def test_create_anonymous_autoclose Tempfile.create(anonymous: true) {|t| assert_equal(true, t.autoclose?) } end end
ruby
github
https://github.com/ruby/ruby
test/test_tempfile.rb
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: postgresql_privs version_added: "1.2" short_description: Grant or revoke privileges on PostgreSQL database objects. description: - Grant or revoke privileges on PostgreSQL database objects. - This module is basically a wrapper around most of the functionality of PostgreSQL's GRANT and REVOKE statements with detection of changes (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)) options: database: description: - Name of database to connect to. - 'Alias: I(db)' required: yes state: description: - If C(present), the specified privileges are granted, if C(absent) they are revoked. required: no default: present choices: [present, absent] privs: description: - Comma separated list of privileges to grant/revoke. - 'Alias: I(priv)' required: no type: description: - Type of database object to set privileges on. required: no default: table choices: [table, sequence, function, database, schema, language, tablespace, group] objs: description: - Comma separated list of database objects to set privileges on. - If I(type) is C(table) or C(sequence), the special value C(ALL_IN_SCHEMA) can be provided instead to specify all database objects of type I(type) in the schema specified via I(schema). (This also works with PostgreSQL < 9.0.) - If I(type) is C(database), this parameter can be omitted, in which case privileges are set for the database specified via I(database). - 'If I(type) is I(function), colons (":") in object names will be replaced with commas (needed to specify function signatures, see examples)' - 'Alias: I(obj)' required: no schema: description: - Schema that contains the database objects specified via I(objs). - May only be provided if I(type) is C(table), C(sequence) or C(function). Defaults to C(public) in these cases. required: no roles: description: - Comma separated list of role (user/group) names to set permissions for. - The special value C(PUBLIC) can be provided instead to set permissions for the implicitly defined PUBLIC group. - 'Alias: I(role)' required: yes grant_option: description: - Whether C(role) may grant/revoke the specified privileges/group memberships to others. - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. - I(grant_option) only has an effect if I(state) is C(present). - 'Alias: I(admin_option)' required: no choices: ['yes', 'no'] host: description: - Database host address. If unspecified, connect via Unix socket. - 'Alias: I(login_host)' default: null required: no port: description: - Database port to connect to. required: no default: 5432 unix_socket: description: - Path to a Unix domain socket for local connections. - 'Alias: I(login_unix_socket)' required: false default: null login: description: - The username to authenticate with. - 'Alias: I(login_user)' default: postgres password: description: - The password to authenticate with. - 'Alias: I(login_password))' default: null required: no notes: - Default authentication assumes that postgresql_privs is run by the C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)). - This module requires Python package I(psycopg2) to be installed on the remote host. In the default case of the remote host also being the PostgreSQL server, PostgreSQL has to be installed there as well, obviously. For Debian/Ubuntu-based systems, install packages I(postgresql) and I(python-psycopg2). - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) have singular alias names (I(priv), I(obj), I(role)). - To revoke only C(GRANT OPTION) for a specific object, set I(state) to C(present) and I(grant_option) to C(no) (see examples). - Note that when revoking privileges from a role R, this role may still have access via privileges granted to any role R is a member of including C(PUBLIC). - Note that when revoking privileges from a role R, you do so as the user specified via I(login). If R has been granted the same privileges by another user also, R can still access database objects via these privileges. - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). requirements: [psycopg2] author: "Bernhard Weitzhofer (@b6d)" """ EXAMPLES = """ # On database "library": # GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION - postgresql_privs: > database=library state=present privs=SELECT,INSERT,UPDATE type=table objs=books,authors schema=public roles=librarian,reader grant_option=yes # Same as above leveraging default values: - postgresql_privs: > db=library privs=SELECT,INSERT,UPDATE objs=books,authors roles=librarian,reader grant_option=yes # REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader # Note that role "reader" will be *granted* INSERT privilege itself if this # isn't already the case (since state=present). - postgresql_privs: > db=library state=present priv=INSERT obj=books role=reader grant_option=no # REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader # "public" is the default schema. This also works for PostgreSQL 8.x. - postgresql_privs: > db=library state=absent privs=INSERT,UPDATE objs=ALL_IN_SCHEMA role=reader # GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian - postgresql_privs: > db=library privs=ALL type=schema objs=public,math role=librarian # GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader # Note the separation of arguments with colons. - postgresql_privs: > db=library privs=ALL type=function obj=add(int:int) schema=math roles=librarian,reader # GRANT librarian, reader TO alice, bob WITH ADMIN OPTION # Note that group role memberships apply cluster-wide and therefore are not # restricted to database "library" here. - postgresql_privs: > db=library type=group objs=librarian,reader roles=alice,bob admin_option=yes # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # Note that here "db=postgres" specifies the database to connect to, not the # database to grant privileges on (which is specified via the "objs" param) - postgresql_privs: > db=postgres privs=ALL type=database obj=library role=librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # If objs is omitted for type "database", it defaults to the database # to which the connection is established - postgresql_privs: > db=library privs=ALL type=database role=librarian """ try: import psycopg2 import psycopg2.extensions except ImportError: psycopg2 = None VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) class Error(Exception): pass # We don't have functools.partial in Python < 2.5 def partial(f, *args, **kwargs): """Partial function application""" def g(*g_args, **g_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(g_kwargs) return f(*(args + g_args), **g_kwargs) g.f = f g.args = args g.kwargs = kwargs return g class Connection(object): """Wrapper around a psycopg2 connection with some convenience methods""" def __init__(self, params): self.database = params.database # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "host":"host", "login":"user", "password":"password", "port":"port", "database": "database", } kw = dict( (params_map[k], getattr(params, k)) for k in params_map if getattr(params, k) != '' ) # If a unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" if is_localhost and params.unix_socket != "": kw["host"] = params.unix_socket self.connection = psycopg2.connect(**kw) self.cursor = self.connection.cursor() def commit(self): self.connection.commit() def rollback(self): self.connection.rollback() @property def encoding(self): """Connection encoding in Python-compatible form""" return psycopg2.extensions.encodings[self.connection.encoding] ### Methods for querying database objects # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like # phrases in GRANT or REVOKE statements, therefore alternative methods are # provided here. def schema_exists(self, schema): query = """SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = %s""" self.cursor.execute(query, (schema,)) return self.cursor.fetchone()[0] > 0 def get_all_tables_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] def get_all_sequences_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S'""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] ### Methods for getting access control lists and group membership info # To determine whether anything has changed after granting/revoking # privileges, we compare the access control lists of the specified database # objects before and afterwards. Python's list/string comparison should # suffice for change detection, we should not actually have to parse ACLs. # The same should apply to group membership information. def get_table_acls(self, schema, tables): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, tables)) return [t[0] for t in self.cursor.fetchall()] def get_sequence_acls(self, schema, sequences): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, sequences)) return [t[0] for t in self.cursor.fetchall()] def get_function_acls(self, schema, function_signatures): funcnames = [f.split('(', 1)[0] for f in function_signatures] query = """SELECT proacl FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE nspname = %s AND proname = ANY (%s) ORDER BY proname, proargtypes""" self.cursor.execute(query, (schema, funcnames)) return [t[0] for t in self.cursor.fetchall()] def get_schema_acls(self, schemas): query = """SELECT nspacl FROM pg_catalog.pg_namespace WHERE nspname = ANY (%s) ORDER BY nspname""" self.cursor.execute(query, (schemas,)) return [t[0] for t in self.cursor.fetchall()] def get_language_acls(self, languages): query = """SELECT lanacl FROM pg_catalog.pg_language WHERE lanname = ANY (%s) ORDER BY lanname""" self.cursor.execute(query, (languages,)) return [t[0] for t in self.cursor.fetchall()] def get_tablespace_acls(self, tablespaces): query = """SELECT spcacl FROM pg_catalog.pg_tablespace WHERE spcname = ANY (%s) ORDER BY spcname""" self.cursor.execute(query, (tablespaces,)) return [t[0] for t in self.cursor.fetchall()] def get_database_acls(self, databases): query = """SELECT datacl FROM pg_catalog.pg_database WHERE datname = ANY (%s) ORDER BY datname""" self.cursor.execute(query, (databases,)) return [t[0] for t in self.cursor.fetchall()] def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option FROM pg_catalog.pg_auth_members am JOIN pg_catalog.pg_roles r ON r.oid = am.roleid WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" self.cursor.execute(query, (groups,)) return self.cursor.fetchall() ### Manipulating privileges def manipulate_privs(self, obj_type, privs, objs, roles, state, grant_option, schema_qualifier=None): """Manipulate database object privileges. :param obj_type: Type of database object to grant/revoke privileges for. :param privs: Either a list of privileges to grant/revoke or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. :param roles: Either a list of role names or "PUBLIC" for the implicitly defined "PUBLIC" group :param state: "present" to grant privileges, "absent" to revoke. :param grant_option: Only for state "present": If True, set grant/admin option. If False, revoke it. If None, don't change grant option. :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", "FUNCTION") must be qualified by schema. Ignored for other Types. """ # get_status: function to get current status if obj_type == 'table': get_status = partial(self.get_table_acls, schema_qualifier) elif obj_type == 'sequence': get_status = partial(self.get_sequence_acls, schema_qualifier) elif obj_type == 'function': get_status = partial(self.get_function_acls, schema_qualifier) elif obj_type == 'schema': get_status = self.get_schema_acls elif obj_type == 'language': get_status = self.get_language_acls elif obj_type == 'tablespace': get_status = self.get_tablespace_acls elif obj_type == 'database': get_status = self.get_database_acls elif obj_type == 'group': get_status = self.get_group_memberships else: raise Error('Unsupported database object type "%s".' % obj_type) # Return False (nothing has changed) if there are no objs to work on. if not objs: return False # obj_ids: quoted db object identifiers (sometimes schema-qualified) if obj_type == 'function': obj_ids = [] for obj in objs: try: f, args = obj.split('(', 1) except: raise Error('Illegal function signature: "%s".' % obj) obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) elif obj_type in ['table', 'sequence']: obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] else: obj_ids = ['"%s"' % o for o in objs] # set_what: SQL-fragment specifying what to set for the target roles: # Either group membership or privileges on objects of a certain type if obj_type == 'group': set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: # function types are already quoted above if obj_type != 'function': obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': if grant_option: if obj_type == 'group': query = 'GRANT %s TO %s WITH ADMIN OPTION' else: query = 'GRANT %s TO %s WITH GRANT OPTION' else: query = 'GRANT %s TO %s' self.cursor.execute(query % (set_what, for_whom)) # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. if grant_option == False: if obj_type == 'group': query = 'REVOKE ADMIN OPTION FOR %s FROM %s' else: query = 'REVOKE GRANT OPTION FOR %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) else: query = 'REVOKE %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) status_after = get_status(objs) return status_before != status_after def main(): module = AnsibleModule( argument_spec = dict( database=dict(required=True, aliases=['db']), state=dict(default='present', choices=['present', 'absent']), privs=dict(required=False, aliases=['priv']), type=dict(default='table', choices=['table', 'sequence', 'function', 'database', 'schema', 'language', 'tablespace', 'group']), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password']) ), supports_check_mode = True ) # Create type object as namespace for module params p = type('Params', (), module.params) # param "schema": default, allowed depends on param "type" if p.type in ['table', 'sequence', 'function']: p.schema = p.schema or 'public' elif p.schema: module.fail_json(msg='Argument "schema" is not allowed ' 'for type "%s".' % p.type) # param "objs": default, required depends on param "type" if p.type == 'database': p.objs = p.objs or p.database elif not p.objs: module.fail_json(msg='Argument "objs" is required ' 'for type "%s".' % p.type) # param "privs": allowed, required depends on param "type" if p.type == 'group': if p.privs: module.fail_json(msg='Argument "privs" is not allowed ' 'for type "group".') elif not p.privs: module.fail_json(msg='Argument "privs" is required ' 'for type "%s".' % p.type) # Connect to Database if not psycopg2: module.fail_json(msg='Python module "psycopg2" must be installed.') try: conn = Connection(p) except psycopg2.Error: e = get_exception() module.fail_json(msg='Could not connect to database: %s' % e) try: # privs if p.privs: privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None # objs: if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_tables_in_schema(p.schema) elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_sequences_in_schema(p.schema) else: objs = p.objs.split(',') # function signatures are encoded using ':' to separate args if p.type == 'function': objs = [obj.replace(':', ',') for obj in objs] # roles if p.roles == 'PUBLIC': roles = 'PUBLIC' else: roles = p.roles.split(',') changed = conn.manipulate_privs( obj_type = p.type, privs = privs, objs = objs, roles = roles, state = p.state, grant_option = p.grant_option, schema_qualifier=p.schema ) except Error: e = get_exception() conn.rollback() module.fail_json(msg=e.message) except psycopg2.Error: e = get_exception() conn.rollback() # psycopg2 errors come in connection encoding, reencode msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), 'replace') module.fail_json(msg=msg) if module.check_mode: conn.rollback() else: conn.commit() module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
addr = 0 registers = {'a': 12, 'b': 0, 'c': 0, 'd': 0} instructions = [line.split() for line in open('input2.txt', 'r')] toggleMap = {'inc': 'dec', 'dec': 'inc', 'tgl': 'inc', 'cpy': 'jnz', 'jnz': 'cpy', 'mul': 'mul', 'nop': 'nop'} def cpy(x, y): if y not in registers: return registers[y] = registers[x] if x in registers else int(x) def inc(x): registers[x] += 1 def dec(x): registers[x] -= 1 def jnz(x, y): global addr if x != '0' and (x not in registers or registers[x] != 0): addr += registers[y] if y in registers else int(y) else: addr += 1 def tgl(x): global addr target = addr + (registers[x] if x in registers else int(x)) if 0 <= target < len(instructions): instructions[target][0] = toggleMap[instructions[target][0]] def mul(x, y, z): a = registers[x] if x in registers else int(x) b = registers[y] if y in registers else int(y) registers[z] = a * b while addr < len(instructions): line = instructions[addr] if line[0] == 'cpy': cpy(line[1], line[2]) addr += 1 elif line[0] == 'inc': inc(line[1]) addr += 1 elif line[0] == 'dec': dec(line[1]) addr += 1 elif line[0] == 'jnz': jnz(line[1], line[2]) elif line[0] == 'tgl': tgl(line[1]) addr += 1 elif line[0] == 'mul': mul(*line[1:]) addr += 1 else: addr += 1 print(registers['a']) input()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''This utility cleans up the html files as emitted by doxygen so that they are suitable for publication on a Google documentation site. ''' import optparse import os import re import shutil import string import sys try: from BeautifulSoup import BeautifulSoup, Tag except (ImportError, NotImplementedError): print ("This tool requires the BeautifulSoup package " "(see http://www.crummy.com/software/BeautifulSoup/).\n" "Make sure that the file BeautifulSoup.py is either in this directory " "or is available in your PYTHON_PATH") raise class HTMLFixer(object): '''This class cleans up the html strings as produced by Doxygen ''' def __init__(self, html): self.soup = BeautifulSoup(html) def FixTableHeadings(self): '''Fixes the doxygen table headings. This includes: - Using bare <h2> title row instead of row embedded in <tr><td> in table - Putting the "name" attribute into the "id" attribute of the <tr> tag. - Splitting up tables into multiple separate tables if a table heading appears in the middle of a table. For example, this html: <table> <tr><td colspan="2"><h2><a name="pub-attribs"></a> Data Fields List</h2></td></tr> ... </table> would be converted to this: <h2>Data Fields List</h2> <table> ... </table> ''' table_headers = [] for tag in self.soup.findAll('tr'): if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']: #tag['id'] = tag.td.h2.a['name'] tag.string = tag.td.h2.a.next tag.name = 'h2' table_headers.append(tag) # reverse the list so that earlier tags don't delete later tags table_headers.reverse() # Split up tables that have multiple table header (th) rows for tag in table_headers: print "Header tag: %s is %s" % (tag.name, tag.string.strip()) # Is this a heading in the middle of a table? if tag.findPreviousSibling('tr') and tag.parent.name == 'table': print "Splitting Table named %s" % tag.string.strip() table = tag.parent table_parent = table.parent table_index = table_parent.contents.index(table) new_table = Tag(self.soup, name='table', attrs=table.attrs) table_parent.insert(table_index + 1, new_table) tag_index = table.contents.index(tag) for index, row in enumerate(table.contents[tag_index:]): new_table.insert(index, row) # Now move the <h2> tag to be in front of the <table> tag assert tag.parent.name == 'table' table = tag.parent table_parent = table.parent table_index = table_parent.contents.index(table) table_parent.insert(table_index, tag) def RemoveTopHeadings(self): '''Removes <div> sections with a header, tabs, or navpath class attribute''' header_tags = self.soup.findAll( name='div', attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')}) [tag.extract() for tag in header_tags] def FixAll(self): self.FixTableHeadings() self.RemoveTopHeadings() def __str__(self): return str(self.soup) def main(): '''Main entry for the doxy_cleanup utility doxy_cleanup takes a list of html files and modifies them in place.''' parser = optparse.OptionParser(usage='Usage: %prog [options] files...') parser.add_option('-m', '--move', dest='move', action='store_true', default=False, help='move html files to "original_html"') options, files = parser.parse_args() if not files: parser.print_usage() return 1 for filename in files: try: with open(filename, 'r') as file: html = file.read() print "Processing %s" % filename fixer = HTMLFixer(html) fixer.FixAll() with open(filename, 'w') as file: file.write(str(fixer)) if options.move: new_directory = os.path.join( os.path.dirname(os.path.dirname(filename)), 'original_html') if not os.path.exists(new_directory): os.mkdir(new_directory) shutil.move(filename, new_directory) except: print "Error while processing %s" % filename raise return 0 if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
#include <torch/headeronly/util/Float8_e5m2fnuz.h>
c
github
https://github.com/pytorch/pytorch
c10/util/Float8_e5m2fnuz-inl.h
# -*- coding: utf-8 -*- #!/usr/bin/python # # Author Yann Bayle # E-mail bayle.yann@live.fr # License MIT # Created 01/12/2016 # Updated 01/12/2016 # Version 1.0.0 # """ Description of bayle.py ====================== 0 Input the local extracted features from YAAFE 13 MFCC per frame 186 musical pieces as train set 1 Computes delta and double delta (39 features per frame) 2 Gather global mean (39 features per musical pieces) 3 train on mfcc & deltas (39 feat/frame) to output global predictions 4 Use global preds to compute song and instru n-grams and histogramm which add 70 feat/track lead to a total of 109 feat/track 5 Fit on 109x186 6 predict (or predict_proba) on 41491 track :Example: source activate py27 ipython run bayle.py -d /media/sf_github/yann/train/ ..todo:: """ import multiprocessing import webbrowser import utils import numpy as np from sklearn.svm import SVC from sklearn import linear_model import sys from functools import partial import time from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score import time import numpy as np import matplotlib.pyplot as plt import math import re import os import sys import csv import time import utils import argparse from datetime import date from collections import Counter from matplotlib.cm import ScalarMappable from matplotlib.colors import Normalize from matplotlib.colorbar import ColorbarBase import matplotlib.pyplot as plt import numpy as np import joblib from sklearn.ensemble import RandomForestClassifier import librosa import os import sys import json import math import utils import random import joblib from pprint import pprint import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import precision_score, recall_score, f1_score from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.neural_network import MLPClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn import datasets from sklearn import svm from sklearn.ensemble import RandomForestClassifier from sklearn.cross_validation import KFold, cross_val_score from statistics import mean, stdev from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score from sklearn import linear_model from sklearn.tree import DecisionTreeClassifier import classify # import reproduce def arr2str(data, separator=","): return separator.join(str(x) for x in data) def str2arr(data): return np.array(data).astype(np.float) def read_gts(filename, separator="\t"): track_gts = {} with open(filename, "r") as filep: for line in filep: line = line.split(separator) track_gts[line[0]] = line[1][:-1] return track_gts def match_feat_with_song_gt(dir_feat, dir_gts): """Description of match_feat_gt Use groundtruth created by http://www.mathieuramona.com/wp/data/jamendo/ associate to local features csv 7041 lines yaafe lab 326.973 sec ramona Definition of YAAFE from http://yaafe.sourceforge.net/features.html """ utils.print_success("Matching local feat to song/instru groundtruths") dir_feat = utils.abs_path_dir(dir_feat) dir_gts = utils.abs_path_dir(dir_gts) block_size = 1024. step_size = 512. fech = 22050. frame_size_ms = block_size / fech filenames = [fn for fn in os.listdir(dir_gts)] for index, filename in enumerate(filenames): utils.print_progress_start(str(index) + "/" + str(len(filenames)) + " " + filename) # gather groundtruths groundtruths = [] with open(dir_gts + filename, "r") as filep: for row in filep: line = row.split(" ") end = float(line[1]) if "no" in line[2]: tag = ",i\n" else: tag = ",s\n" groundtruths.append([end, tag]) gt_len = len(groundtruths) overflow = False gt_index = 0 cpt = 0 # Write features & groundtruths to file str_to_write = "" feat_fn = filename.split(".")[0] feat_fn += ".wav.mfcc.csv" with open(dir_feat + feat_fn, "r") as filep: for index, line in enumerate(filep): # todo cleanup if gt_index < gt_len: if frame_size_ms * index > groundtruths[gt_index][0]: gt_index += 1 if gt_index < gt_len: str_to_write += line[:-1] + groundtruths[gt_index][1] with open(dir_feat + feat_fn, "w") as filep: filep.write(str_to_write) utils.print_progress_end() def match_feat_with_instru_gt(indir, outdir): """Description of match_feat_gt Apply instru groundtruth to CCmixter and MedleyDB """ utils.print_success("Matching local features to instrumental groundtruths") indir = utils.abs_path_dir(indir) + "/" outdir = utils.abs_path_dir(outdir) + "/" filenames = [fn for fn in os.listdir(indir)] for filename in filenames: outfile = open(outdir + filename, "w") with open(indir + filename, "r") as filep: for line in filep: outfile.write(line[:-1] + " i\n") outfile.close() def process_local_feat(indir, file_gts_track, outdir_local, out_feat_global, train): """Description of process_local_feat Add delta and double delta to MFCCs """ utils.print_success("Processing local features") # Preprocess arg indir = utils.abs_path_dir(indir) file_gts_track = utils.abs_path_file(file_gts_track) filelist = os.listdir(indir) outdir_local = utils.abs_path_dir(outdir_local) track_gts = {} with open(file_gts_track, "r") as filep: for line in filep: line = line.split(",") if train: index = line[0] else: index = line[0] + ".wav.mfcc.csv" track_gts[index] = line[1][:-1] for index, filename in enumerate(filelist): utils.print_progress_start(str(index) + "/" + str(len(filelist)) + " " + filename) if filename in track_gts: mfccs = [] groundtruths = [] with open(indir + filename, "r") as filep: next(filep) next(filep) next(filep) next(filep) next(filep) for line in filep: line = line.split(",") mfccs.append(str2arr(line[:-1])) if train: groundtruths.append(line[-1][:-1]) mfccs = np.array(mfccs) delta_mfcc = librosa.feature.delta(mfccs) delta2_mfcc = librosa.feature.delta(mfccs, order=2) # Write local features in outdir_local with open(outdir_local + filename, "w") as filep: gt_to_write = "" if "i" in track_gts[filename]: gt_to_write = ",i" elif "s" in track_gts[filename]: # postpone frame groundtruth annotationa to another function later in the code gt_to_write = "" else: utils.print_warning("bayle.py line 231 local frame groundtruth undefined") if train: for a, b, c, d in zip(mfccs, delta_mfcc, delta2_mfcc, groundtruths): filep.write(arr2str(a) + "," + arr2str(b) + "," + arr2str(c) + "," + d + "\n") else: for a, b, c in zip(mfccs, delta_mfcc, delta2_mfcc): filep.write(arr2str(a) + "," + arr2str(b) + "," + arr2str(c) + gt_to_write + "\n") # # Write global features in out_feat_global # with open(out_feat_global, "a") as filep: # filep.write(filename + "," + # arr2str(np.mean(mfccs, axis=0)) + "," + # arr2str(np.mean(delta_mfcc, axis=0)) + "," + # arr2str(np.mean(delta2_mfcc, axis=0)) + "," + # track_gts[filename] + "\n") utils.print_progress_end() utils.print_success("Adding local groundtruths to Songs in Jamendo thanks to Ramona annotations") match_feat_with_song_gt(dir_feat=outdir_local, dir_gts="groundtruths/frame_annot_jamendo_ramona/") utils.print_success("Done") def column(matrix, i): return [row[i] for row in matrix] def ngram_proba(local_pred, threshold=0.5, above_threshold=True): """ n-gram creation """ cpt_ngram = 0 nb_ngram = 30 ngrams = [0,] * nb_ngram for pred in local_pred: if above_threshold: condition = pred > threshold else: condition = pred <= threshold if condition: cpt_ngram += 1 else: if cpt_ngram < nb_ngram: ngrams[cpt_ngram] += 1 else: ngrams[nb_ngram-1] += 1 cpt_ngram = 0 nb_tag_sing = float(sum(ngrams)) if nb_tag_sing > 0.: ngrams = [float(x) / nb_tag_sing for x in ngrams] # utils.print_error(ngrams) return ','.join(str(x) for x in ngrams) def ngram(preds, tag): """Description of ngram """ cpt_ngram = 0 nb_ngram = 30 ngrams = [0,] * nb_ngram for pred in preds: if tag in pred: cpt_ngram += 1 else: if cpt_ngram < nb_ngram: ngrams[cpt_ngram] += 1 else: ngrams[nb_ngram-1] += 1 cpt_ngram = 0 nb_tag = float(sum(ngrams)) if nb_tag > 0.: ngrams = [float(x) / nb_tag for x in ngrams] return ','.join(str(x) for x in ngrams) def create_track_feat_testset(folder, infile, outfile, model_file, train=False): """Description of create_track_feat_testset Need to read each test file compute deltas on mfcc in the ram predict and predict_proba generate song and instru ngrams and histograms Add the mean of mfcc+deltas append 109 features vector in feat_track/feat_test.csv """ utils.print_success("Create track feat testset") folder = utils.abs_path_dir(folder) infile = utils.abs_path_file(infile) clf = joblib.load(model_file) track_gts = read_gts(infile, separator=",") for index, filename in enumerate(track_gts): utils.print_progress_start(str(index+1) + "/" + str(len(track_gts)) + " " + filename) mfccs = [] mfccs_1 = [] extension = "" if train: extension = "" else: extension += "_audio_full_mono_22k" extension += ".wav.mfcc.csv" with open(folder + filename + extension, "r") as filep: if train: next(filep) next(filep) next(filep) next(filep) next(filep) for line in filep: if train: line = line.split(",") else: line = line.split(" ") mfccs_1.append(str2arr(line[:-1])) # if train: # mfccs.append(str2arr(line[:-1])) # else: # mfccs.append(str2arr(line[0:])) mfccs = np.array(mfccs_1) delta_mfcc = librosa.feature.delta(mfccs) delta2_mfcc = librosa.feature.delta(mfccs, order=2) tmp = np.append(mfccs, delta_mfcc, axis=1) features = np.append(tmp, delta2_mfcc, axis=1) preds_proba = clf.predict_proba(features) # Histogramm nb_hist_class = 10 numbers = column(preds_proba, 0) hist_pred = np.histogram(numbers, nb_hist_class) hist_pred_norm = hist_pred[0] / float(sum(hist_pred[0])) ngram_threshold = 0.5 song_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=True) instru_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=False) preds = clf.predict(features) song_ngram = ngram(preds, "s") instru_ngram = ngram(preds, "i") with open(outfile, "a") as filep: filep.write(filename[:12] + "," + arr2str(np.mean(mfccs, axis=0)) + "," + arr2str(np.mean(delta_mfcc, axis=0)) + "," + arr2str(np.mean(delta2_mfcc, axis=0)) + "," + arr2str(hist_pred_norm) + "," + song_ngram_proba + "," + instru_ngram_proba + "," + song_ngram + "," + instru_ngram + "," + track_gts[filename] + "\n") utils.print_progress_end() def figures1bd(indir, file_gts_track): """Description of figures1bd infile is formated like: /media/sf_github/yann/train/01 - 01 Les Jardins Japonais.wav.mfcc.csv feat1 feat2 ... featn tag1 feat1 feat2 ... featn tag2 ... feat1 feat2 ... featn tag2 0 Input the local extracted features from YAAFE 13 MFCC per frame 186 musical pieces as train set 1 Computes delta and double delta (39 features per frame) 2 Gather global mean (39 features per musical pieces) 3 train on mfcc & deltas (39 feat/frame) to output global predictions 4 Use global preds to compute song and instru n-grams and histogramm which add 70 feat/track lead to a total of 109 feat/track 5 Fit on 109x186 6 predict (or predict_proba) on 41491 track """ # Preprocess arg indir = utils.abs_path_dir(indir) file_gts_track = utils.abs_path_file(file_gts_track) feat_frame_train = "feat_frame_train/" utils.create_dir(feat_frame_train) feat_frame_test = "feat_frame_test/" utils.create_dir(feat_frame_test) outdir_global = "feat_track/" utils.create_dir(outdir_global) feat_train = outdir_global + "train.csv" feat_test = outdir_global + "test.csv" models_dir = "models/" utils.create_dir(models_dir) loc_feat_testset_dirpath = "/media/sf_DATA/Datasets/Simbals/yaafe/results/processed/" filelist_test = "filelist_test.tsv" filelist_train = "filelist_train.tsv" models_global = "models_track/" utils.create_dir(models_global) # process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True) # classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=",") # create_track_feat_testset(indir, filelist_train, feat_train, train=True) # 15h28m44s to 19h08m28s Done in 13184117ms # create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test) # classify.create_models(outdir=models_global, train_file=feat_train) # classify.test_models_parallel( # models_dir=models_global, # out_dir="results/", # test_file=feat_test) # Display results reproduce.plot_results("results/") def figure1a(file_gts_track): """Description of figure1a """ outdir_global = "feat_track/" utils.create_dir(outdir_global) feat_train = outdir_global + "train.csv" # process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True) classify.cross_validation(feat_train, n_folds=5) def figure2(indir, file_gts_track): """Description of figure2 Method to maintain 100 percent of precision and to maximize recall. """ pass def read_file_bayle(filename): """Description of read_file train/test example line: filename,feat1,feat2,...,featn,tag """ filename = utils.abs_path_file(filename) filenames = [] groundtruths = [] features = [] with open(filename, "r") as filep: for row in filep: line = row.split(",") filenames.append(line[0]) features.append([float(i) for i in line[1:-1]]) gt = line[-1] while "\n" in gt or "\r" in gt: gt = gt [:-1] groundtruths.append(gt) return filenames, features, groundtruths def column(matrix, i): return [row[i] for row in matrix] def process_results(train, test): train_fn, train_features, train_groundtruths = read_file_bayle(train) test_fn, test_features, test_groundtruths = read_file_bayle(test) step = 0.1 # for weight in np.arange(0.0, 1.0, step): # inside_clf = RandomForestClassifier(random_state=2) inside_clf = DecisionTreeClassifier(random_state=2) # class_weight={"i":weight, "s":1-weight}) clf = AdaBoostClassifier( random_state=2,#with 4 98%precision song class base_estimator=inside_clf) clf.fit(train_features, train_groundtruths) predictions = clf.predict(test_features) print("Accuracy " + str(accuracy_score(test_groundtruths, predictions))) print("F-Measure " + str(f1_score(test_groundtruths, predictions, average="weighted"))) print("Precision " + str(precision_score(test_groundtruths, predictions, average=None))) print("Recall " + str(recall_score(test_groundtruths, predictions, average=None))) print("F-Measure " + str(f1_score(test_groundtruths, predictions, average=None))) # predictions = [1.0 if i=="s" else 0.0 for i in predictions] predictions = column(clf.predict_proba(test_features), 0) outdir = "predictions/" with open(outdir + "Bayle.csv", "w") as filep: for name, pred in zip(test_fn, predictions): filep.write(name + "," + str(1.0 - float(pred)) + "\n") def new_algo_final(indir, file_gts_track): utils.print_success("Approx. time ~6 hours.") # Preprocess arg indir = utils.abs_path_dir(indir) file_gts_track = utils.abs_path_file(file_gts_track) dir_tmp = utils.create_dir(utils.create_dir("src/tmp") + "bayle") feat_frame_train = utils.create_dir(dir_tmp + "feat_frame_train") feat_frame_test = utils.create_dir(dir_tmp + "feat_frame_test") outdir_global = utils.create_dir(dir_tmp + "feat_track") feat_train = outdir_global + "train.csv" feat_test = outdir_global + "test.csv" models_dir = utils.create_dir(dir_tmp + "models") loc_feat_testset_dirpath = "features/database2/" filelist_train = "groundtruths/database1.csv" filelist_test = "groundtruths/database2.csv" models_global = utils.create_dir(dir_tmp + "models_track") process_local_feat(indir, file_gts_track, outdir_local=feat_frame_train, out_feat_global=feat_train, train=False) classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=",", classifiers="RandomForest") """ Create features at track scale for the train set Features: MFCC + Delta + Double Delta + ngrams + hist """ model_file = "src/tmp/bayle/models/RandomForest/RandomForest.pkl" model_file = "/media/sf_DATA/ReproducibleResearchIEEE2017/src/tmp/bayle/models/RandomForest/RandomForest.pkl" create_track_feat_testset(indir, filelist_train, feat_train, model_file, train=True) # # 15h28m44s to 19h08m28s Done in 13184117ms create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test, model_file) classify.create_models(outdir=models_global, train_file=feat_train, classifiers="RandomForest") process_results(feat_train, feat_test) def main(): begin = int(round(time.time() * 1000)) PARSER = argparse.ArgumentParser(description="Bayle et al. (2017) algorithm") PARSER.add_argument( "-d", "--indir", help="input dir containing all local features extracted by YAAFE", type=str, default="/media/sf_github/yann/train/", metavar="indir") PARSER.add_argument( "-i", "--gts", help="input file containing all track groundtruths", type=str, default="filelist_train.tsv") indir = "features/database1/" file_gts_track = "groundtruths/database1.csv" new_algo_final(indir, file_gts_track) # figure1a(PARSER.parse_args().gts) # figures1bd(PARSER.parse_args().indir, PARSER.parse_args().gts) # figure2(PARSER.parse_args().indir, PARSER.parse_args().gts) # Local feat processing # Global feat processing # bayle_fig3() utils.print_success("Done in " + str(int(round(time.time() * 1000)) - begin) + "ms") if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
{ "name": "@next/bundle-analyzer-ui", "version": "16.0.2-canary.16", "private": true, "scripts": { "build": "cross-env NEXT_TEST_NATIVE_DIR=no next build --no-mangling", "dev": "cross-env NEXT_TEST_NATIVE_DIR=no next dev", "lint": "eslint .", "start": "cross-env NEXT_TEST_NATIVE_DIR=no next start" }, "dependencies": { "@radix-ui/react-dialog": "1.1.4", "@radix-ui/react-popover": "1.1.4", "@radix-ui/react-select": "2.2.6", "@radix-ui/react-slot": "1.1.1", "@radix-ui/react-toggle-group": "1.1.1", "@radix-ui/react-tooltip": "1.1.4", "autoprefixer": "^10.4.20", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "cmdk": "1.0.4", "lucide-react": "^0.554.0", "next": "16.0.8", "next-themes": "^0.4.6", "polished": "^4.3.1", "react": "19.2.0", "react-dom": "19.2.0", "swr": "^2.2.4", "tailwind-merge": "^2.5.5", "tailwindcss-animate": "^1.0.7" }, "devDependencies": { "@tailwindcss/postcss": "^4.1.9", "@types/node": "^22", "@types/react": "^18", "@types/react-dom": "^18", "cross-env": "10.1.0", "inliner": "1.13.1", "postcss": "^8.5", "tailwindcss": "^4.1.9", "tw-animate-css": "1.3.3", "typescript": "^5" } }
json
github
https://github.com/vercel/next.js
apps/bundle-analyzer/package.json
""" mbed CMSIS-DAP debugger Copyright (c) 2015 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import logging from ..target.semihost import SemihostIOHandler # Open mode flags O_RDONLY = 0x0 O_WRONLY = 0x1 O_RDWR = 0x2 O_APPEND = 0x8 O_CREAT = 0x200 O_TRUNC = 0x400 O_EXCL = 0x800 # Offset added to file descriptor numbers returned from gdb. This offset is to make # sure we don't overlap with the standard I/O file descriptors 1, 2, and 3 (fds must be # non-zero for semihosting). FD_OFFSET = 4 ## # @brief Semihosting file I/O handler that performs GDB syscalls. class GDBSyscallIOHandler(SemihostIOHandler): def __init__(self, server): super(GDBSyscallIOHandler, self).__init__() self._server = server def open(self, fnptr, fnlen, mode): # Handle standard I/O. fd, _ = self._std_open(fnptr, fnlen, mode) if fd is not None: return fd # Convert mode string to flags. modeval = 0 hasplus = '+' in mode if 'r' in mode: if hasplus: modeval |= O_RDWR else: modeval |= O_RDONLY elif 'w' in mode: if hasplus: modeval |= O_RDWR | O_CREAT | O_TRUNC else: modeval |= O_WRONLY | O_CREAT | O_TRUNC elif 'a' in mode: if hasplus: modeval |= O_RDWR | O_APPEND | O_CREAT else: modeval |= O_WRONLY | O_APPEND | O_CREAT result, self._errno = self._server.syscall('open,%x/%x,%x,%x' % (fnptr, fnlen+1, modeval, 0777)) if result != -1: result += FD_OFFSET return result def close(self, fd): fd -= FD_OFFSET result, self._errno = self._server.syscall('close,%x' % (fd)) return result # syscall return: number of bytes written # semihost return: 0 is success, or number of bytes not written def write(self, fd, ptr, length): fd -= FD_OFFSET result, self._errno = self._server.syscall('write,%x,%x,%x' % (fd, ptr, length)) return length - result # syscall return: number of bytes read # semihost return: 0 is success, length is EOF, number of bytes not read def read(self, fd, ptr, length): fd -= FD_OFFSET result, self._errno = self._server.syscall('read,%x,%x,%x' % (fd, ptr, length)) return length - result def readc(self): ptr = self.agent.target.readCoreRegister('sp') - 4 result, self._errno = self._server.syscall('read,0,%x,1' % (ptr)) if result != -1: result = self.agent.target.read8(ptr) return result def istty(self, fd): fd -= FD_OFFSET result, self._errno = self._server.syscall('isatty,%x' % (fd)) return result def seek(self, fd, pos): fd -= FD_OFFSET result, self._errno = self._server.syscall('lseek,%x,%x,0' % (fd, pos)) return 0 if result is not -1 else -1 def flen(self, fd): fd -= FD_OFFSET ptr = self.agent.target.readCoreRegister('sp') - 64 result, self._errno = self._server.syscall('fstat,%x,%x' % (fd, ptr)) if result != -1: # Fields in stat struct are big endian as written by gdb. size = self.agent.target.readBlockMemoryUnaligned8(ptr, 8) result = (size[0] << 56) \ | (size[1] << 48) \ | (size[2] << 40) \ | (size[3] << 32) \ | (size[4] << 24) \ | (size[5] << 16) \ | (size[6] << 8) \ | (size[7]) return result
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true # :markup: markdown require "delegate" require "io/console/size" module ActionDispatch module Routing class RouteWrapper < SimpleDelegator # :nodoc: def matches_filter?(filter, value) return __getobj__.path.match(value) if filter == :exact_path_match value.match?(public_send(filter)) end def endpoint case when app.dispatcher? "#{controller}##{action}" when rack_app.is_a?(Proc) "Inline handler (Proc/Lambda)" else rack_app.inspect end end def constraints requirements.except(:controller, :action) end def rack_app app.rack_app end def path super.spec.to_s end def name super.to_s end def reqs @reqs ||= begin reqs = endpoint reqs += " #{constraints}" unless constraints.empty? reqs end end def controller parts.include?(:controller) ? ":controller" : requirements[:controller] end def action parts.include?(:action) ? ":action" : requirements[:action] end def internal? internal end def action_source_location file, line = action_source_file_and_line return unless file "#{file}:#{line}" end def action_source_file_and_line return unless app.dispatcher? return unless controller && action controller_name = controller.to_s action_name = action.to_s return if controller_name.start_with?(":") || action_name.start_with?(":") begin controller_class = "#{controller_name.camelize}Controller".constantize method = controller_class.instance_method(action_name.to_sym) method.source_location rescue NameError, TypeError nil end end def engine? app.engine? end def to_h file, line = action_source_file_and_line { name: name, verb: verb, path: path, reqs: reqs, source_location: source_location, action_source_location: action_source_location, action_source_file: file, action_source_line: line } end end ## # This class is just used for displaying route information when someone # executes `bin/rails routes` or looks at the RoutingError page. People should # not use this class. class RoutesInspector # :nodoc: def initialize(routes) @routes = wrap_routes(routes) @engines = load_engines_routes end def format(formatter, filter = {}) all_routes = { nil => @routes }.merge(@engines) all_routes.each do |engine_name, routes| format_routes(formatter, filter, engine_name, routes) end formatter.result end private def format_routes(formatter, filter, engine_name, routes) routes = filter_routes(routes, normalize_filter(filter)).map(&:to_h) formatter.section_title "Routes for #{engine_name || "application"}" if @engines.any? if routes.any? formatter.header routes formatter.section routes else formatter.no_routes engine_name, routes, filter end formatter.footer routes end def wrap_routes(routes) routes.routes.map { |route| RouteWrapper.new(route) }.reject(&:internal?) end def load_engines_routes engine_routes = @routes.select(&:engine?) engines = engine_routes.to_h do |engine_route| engine_app_routes = engine_route.rack_app.routes engine_app_routes = engine_app_routes.routes if engine_app_routes.is_a?(ActionDispatch::Routing::RouteSet) [engine_route.endpoint, wrap_routes(engine_app_routes)] end engines end def normalize_filter(filter) if filter[:controller] { controller: /#{filter[:controller].underscore.sub(/_?controller\z/, "")}/ } elsif filter[:grep] grep_pattern = Regexp.new(filter[:grep]) path = URI::RFC2396_PARSER.escape(filter[:grep]) normalized_path = ("/" + path).squeeze("/") { controller: grep_pattern, action: grep_pattern, verb: grep_pattern, name: grep_pattern, path: grep_pattern, exact_path_match: normalized_path, } end end def filter_routes(routes, filter) if filter routes.select do |route| filter.any? { |filter_type, value| route.matches_filter?(filter_type, value) } end else routes end end end module ConsoleFormatter class Base def initialize @buffer = [] end def result @buffer.join("\n") end def section_title(title) end def section(routes) end def header(routes) end def footer(routes) end def no_routes(engine, routes, filter) @buffer << if filter.key?(:controller) "No routes were found for this controller." elsif filter.key?(:grep) "No routes were found for this grep pattern." elsif routes.none? if engine "No routes defined." else <<~MESSAGE You don't have any routes defined! Please add some routes in config/routes.rb. MESSAGE end end unless engine @buffer << "For more information about routes, see the Rails guide: https://guides.rubyonrails.org/routing.html." end end end class Sheet < Base def section_title(title) @buffer << "#{title}:" end def section(routes) @buffer << draw_section(routes) end def header(routes) @buffer << draw_header(routes) end def footer(routes) @buffer << "" end private def draw_section(routes) header_lengths = ["Prefix", "Verb", "URI Pattern"].map(&:length) name_width, verb_width, path_width = widths(routes).zip(header_lengths).map(&:max) routes.map do |r| "#{r[:name].rjust(name_width)} #{r[:verb].ljust(verb_width)} #{r[:path].ljust(path_width)} #{r[:reqs]}" end end def draw_header(routes) name_width, verb_width, path_width = widths(routes) "#{"Prefix".rjust(name_width)} #{"Verb".ljust(verb_width)} #{"URI Pattern".ljust(path_width)} Controller#Action" end def widths(routes) [routes.map { |r| r[:name].length }.max || 0, routes.map { |r| r[:verb].length }.max || 0, routes.map { |r| r[:path].length }.max || 0] end end class Expanded < Base def initialize(width: IO.console_size[1]) @width = width super() end def section_title(title) @buffer << "#{"[ #{title} ]"}" end def section(routes) @buffer << draw_expanded_section(routes) end def footer(routes) @buffer << "" end private def draw_expanded_section(routes) routes.map.each_with_index do |r, i| route_rows = <<~MESSAGE.chomp #{route_header(index: i + 1)} Prefix | #{r[:name]} Verb | #{r[:verb]} URI | #{r[:path]} Controller#Action | #{r[:reqs]} MESSAGE route_rows += "\nSource Location | #{r[:source_location]}" if r[:source_location].present? route_rows += "\nAction Location | #{r[:action_source_location]}" if r[:action_source_location].present? route_rows end end def route_header(index:) "--[ Route #{index} ]".ljust(@width, "-") end end class Unused < Sheet def header(routes) @buffer << <<~MSG Found #{routes.count} unused #{"route".pluralize(routes.count)}: MSG super end def no_routes(engine, routes, filter) @buffer << if filter.none? "No unused routes found." elsif filter.key?(:controller) "No unused routes found for this controller." elsif filter.key?(:grep) "No unused routes found for this grep pattern." end end end end class HtmlTableFormatter def initialize(view) @view = view @buffer = [] end def section_title(title) @buffer << %(<tr><th colspan="5">#{title}</th></tr>) end def section(routes) @buffer << @view.render(partial: "routes/route", collection: routes) end # The header is part of the HTML page, so we don't construct it here. def header(routes) end def footer(routes) end def no_routes(*) @buffer << <<~MESSAGE <p>You don't have any routes defined!</p> <ul> <li>Please add some routes in <tt>config/routes.rb</tt>.</li> <li> For more information about routes, please see the Rails guide <a href="https://guides.rubyonrails.org/routing.html">Rails Routing from the Outside In</a>. </li> </ul> MESSAGE end def result @view.raw @view.render(layout: "routes/table") { @view.raw @buffer.join("\n") } end end end end
ruby
github
https://github.com/rails/rails
actionpack/lib/action_dispatch/routing/inspector.rb
#========================================================================= # ParcProcFL_test #========================================================================= from __future__ import print_function import pytest import pisa import struct from pymtl import * from pclib.test import TestSource, TestSink from pclib.ifcs import MemMsg from ParcProcFL import ParcProcFL from GenericXcelFL import GenericXcelFL from pclib.test.TestMemoryFuture import TestMemory from pisa.pisa_inst_test_utils import asm_test class TestHarness (Model): #----------------------------------------------------------------------- # constructor #----------------------------------------------------------------------- def __init__( s ): # Instantiate models s.src = TestSource ( 32, [], 0 ) s.sink = TestSink ( 32, [], 0 ) s.proc = ParcProcFL () s.mem = TestMemory ( MemMsg(32,32), 3 ) s.xcel = GenericXcelFL () #----------------------------------------------------------------------- # elaborate #----------------------------------------------------------------------- def elaborate_logic( s ): # Processor <-> Proc/Mngr s.connect( s.proc.mngr2proc, s.src.out ) s.connect( s.proc.proc2mngr, s.sink.in_ ) # Processor <-> Memory s.connect( s.proc.imemreq, s.mem.reqs[0] ) s.connect( s.proc.imemresp, s.mem.resps[0] ) s.connect( s.proc.dmemreq, s.mem.reqs[1] ) s.connect( s.proc.dmemresp, s.mem.resps[1] ) # Processor <-> Accelerator s.connect( s.proc.xcelreq, s.xcel.xcelreq ) s.connect( s.proc.xcelresp, s.xcel.xcelresp ) # Accelerator <-> Memory # s.connect( s.mvmult.memreq, s.mem.reqs[2] ) # s.connect( s.mvmult.memresp, s.mem.resps[2] ) #----------------------------------------------------------------------- # load #----------------------------------------------------------------------- def load( self, mem_image ): # Iterate over the sections sections = mem_image.get_sections() for section in sections: # For .mngr2proc sections, copy section into mngr2proc src if section.name == ".mngr2proc": for i in xrange(0,len(section.data),4): bits = struct.unpack_from("<I",buffer(section.data,i,4))[0] self.src.src.msgs.append( Bits(32,bits) ) # For .proc2mngr sections, copy section into proc2mngr_ref src elif section.name == ".proc2mngr": for i in xrange(0,len(section.data),4): bits = struct.unpack_from("<I",buffer(section.data,i,4))[0] self.sink.sink.msgs.append( Bits(32,bits) ) # For all other sections, simply copy them into the memory else: start_addr = section.addr stop_addr = section.addr + len(section.data) self.mem.mem[start_addr:stop_addr] = section.data #----------------------------------------------------------------------- # cleanup #----------------------------------------------------------------------- def cleanup( s ): del s.mem.mem[:] #----------------------------------------------------------------------- # done #----------------------------------------------------------------------- def done( s ): return s.src.done and s.sink.done #----------------------------------------------------------------------- # line_trace #----------------------------------------------------------------------- def line_trace( s ): return s.src.line_trace() + " > " + \ s.proc.line_trace() + "|" + \ s.xcel.line_trace() + " " + \ s.mem.line_trace() + " > " + \ s.sink.line_trace() #------------------------------------------------------------------------- # run_test #------------------------------------------------------------------------- def run_test( gen_test ): # Instantiate and elaborate the model model = TestHarness() model.elaborate() # Assemble the test program mem_image = pisa.pisa_encoding.assemble( gen_test() ) # Load the program into the model model.load( mem_image ) # Create a simulator using the simulation tool sim = SimulationTool( model ) # Run the simulation print() sim.reset() while not model.done(): sim.print_line_trace() sim.cycle() # Add a couple extra ticks so that the VCD dump is nicer sim.cycle() sim.cycle() sim.cycle() model.cleanup() #------------------------------------------------------------------------- # mngr #------------------------------------------------------------------------- import pisa.pisa_inst_mngr_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_mngr_test.gen_basic_test ), asm_test( pisa.pisa_inst_mngr_test.gen_bypass_test ), asm_test( pisa.pisa_inst_mngr_test.gen_value_test ), ]) def test_mngr( name, test ): run_test( test ) #------------------------------------------------------------------------- # addu #------------------------------------------------------------------------- import pisa.pisa_inst_addu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_addu_test.gen_basic_test ), asm_test( pisa.pisa_inst_addu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_addu_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_addu_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_addu_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_addu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_addu_test.gen_value_test ), asm_test( pisa.pisa_inst_addu_test.gen_random_test ), ]) def test_addu( name, test ): run_test( test ) #------------------------------------------------------------------------- # subu #------------------------------------------------------------------------- import pisa.pisa_inst_subu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_subu_test.gen_basic_test ), asm_test( pisa.pisa_inst_subu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_subu_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_subu_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_subu_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_subu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_subu_test.gen_value_test ), asm_test( pisa.pisa_inst_subu_test.gen_random_test ), ]) def test_subu( name, test ): run_test( test ) #------------------------------------------------------------------------- # and #------------------------------------------------------------------------- import pisa.pisa_inst_and_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_and_test.gen_basic_test ), asm_test( pisa.pisa_inst_and_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_and_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_and_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_and_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_and_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_and_test.gen_value_test ), asm_test( pisa.pisa_inst_and_test.gen_random_test ), ]) def test_and( name, test ): run_test( test ) #------------------------------------------------------------------------- # or #------------------------------------------------------------------------- import pisa.pisa_inst_or_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_or_test.gen_basic_test ), asm_test( pisa.pisa_inst_or_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_or_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_or_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_or_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_or_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_or_test.gen_value_test ), asm_test( pisa.pisa_inst_or_test.gen_random_test ), ]) def test_or( name, test ): run_test( test ) #------------------------------------------------------------------------- # xor #------------------------------------------------------------------------- import pisa.pisa_inst_xor_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_xor_test.gen_basic_test ), asm_test( pisa.pisa_inst_xor_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_xor_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_xor_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_xor_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_xor_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_xor_test.gen_value_test ), asm_test( pisa.pisa_inst_xor_test.gen_random_test ), ]) def test_xor( name, test ): run_test( test ) #------------------------------------------------------------------------- # nor #------------------------------------------------------------------------- import pisa.pisa_inst_nor_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_nor_test.gen_basic_test ), asm_test( pisa.pisa_inst_nor_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_nor_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_nor_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_nor_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_nor_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_nor_test.gen_value_test ), asm_test( pisa.pisa_inst_nor_test.gen_random_test ), ]) def test_nor( name, test ): run_test( test ) #------------------------------------------------------------------------- # slt #------------------------------------------------------------------------- import pisa.pisa_inst_slt_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_slt_test.gen_basic_test ), asm_test( pisa.pisa_inst_slt_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_slt_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_slt_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_slt_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_slt_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_slt_test.gen_value_test ), asm_test( pisa.pisa_inst_slt_test.gen_random_test ), ]) def test_slt( name, test ): run_test( test ) #------------------------------------------------------------------------- # sltu #------------------------------------------------------------------------- import pisa.pisa_inst_sltu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sltu_test.gen_basic_test ), asm_test( pisa.pisa_inst_sltu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sltu_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_sltu_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_sltu_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_sltu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sltu_test.gen_value_test ), asm_test( pisa.pisa_inst_sltu_test.gen_random_test ), ]) def test_sltu( name, test ): run_test( test ) #------------------------------------------------------------------------- # addiu #------------------------------------------------------------------------- import pisa.pisa_inst_addiu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_addiu_test.gen_basic_test ), asm_test( pisa.pisa_inst_addiu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_addiu_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_addiu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_addiu_test.gen_value_test ), asm_test( pisa.pisa_inst_addiu_test.gen_random_test ), ]) def test_addiu( name, test ): run_test( test ) #------------------------------------------------------------------------- # andi #------------------------------------------------------------------------- import pisa.pisa_inst_andi_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_andi_test.gen_basic_test ), asm_test( pisa.pisa_inst_andi_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_andi_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_andi_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_andi_test.gen_value_test ), asm_test( pisa.pisa_inst_andi_test.gen_random_test ), ]) def test_andi( name, test ): run_test( test ) #------------------------------------------------------------------------- # ori #------------------------------------------------------------------------- import pisa.pisa_inst_ori_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_ori_test.gen_basic_test ), asm_test( pisa.pisa_inst_ori_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_ori_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_ori_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_ori_test.gen_value_test ), asm_test( pisa.pisa_inst_ori_test.gen_random_test ), ]) def test_ori( name, test ): run_test( test ) #------------------------------------------------------------------------- # xori #------------------------------------------------------------------------- import pisa.pisa_inst_xori_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_xori_test.gen_basic_test ), asm_test( pisa.pisa_inst_xori_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_xori_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_xori_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_xori_test.gen_value_test ), asm_test( pisa.pisa_inst_xori_test.gen_random_test ), ]) def test_xori( name, test ): run_test( test ) #------------------------------------------------------------------------- # slti #------------------------------------------------------------------------- import pisa.pisa_inst_slti_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_slti_test.gen_basic_test ), asm_test( pisa.pisa_inst_slti_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_slti_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_slti_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_slti_test.gen_value_test ), asm_test( pisa.pisa_inst_slti_test.gen_random_test ), ]) def test_slti( name, test ): run_test( test ) #------------------------------------------------------------------------- # sltiu #------------------------------------------------------------------------- import pisa.pisa_inst_sltiu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sltiu_test.gen_basic_test ), asm_test( pisa.pisa_inst_sltiu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sltiu_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sltiu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sltiu_test.gen_value_test ), asm_test( pisa.pisa_inst_sltiu_test.gen_random_test ), ]) def test_sltiu( name, test ): run_test( test ) #------------------------------------------------------------------------- # sll #------------------------------------------------------------------------- import pisa.pisa_inst_sll_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sll_test.gen_basic_test ), asm_test( pisa.pisa_inst_sll_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sll_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sll_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sll_test.gen_value_test ), asm_test( pisa.pisa_inst_sll_test.gen_random_test ), ]) def test_sll( name, test ): run_test( test ) #------------------------------------------------------------------------- # srl #------------------------------------------------------------------------- import pisa.pisa_inst_srl_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_srl_test.gen_basic_test ), asm_test( pisa.pisa_inst_srl_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_srl_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_srl_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_srl_test.gen_value_test ), asm_test( pisa.pisa_inst_srl_test.gen_random_test ), ]) def test_srl( name, test ): run_test( test ) #------------------------------------------------------------------------- # sra #------------------------------------------------------------------------- import pisa.pisa_inst_sra_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sra_test.gen_basic_test ), asm_test( pisa.pisa_inst_sra_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sra_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sra_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sra_test.gen_value_test ), asm_test( pisa.pisa_inst_sra_test.gen_random_test ), ]) def test_sra( name, test ): run_test( test ) #------------------------------------------------------------------------- # sllv #------------------------------------------------------------------------- import pisa.pisa_inst_sllv_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sllv_test.gen_basic_test ), asm_test( pisa.pisa_inst_sllv_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sllv_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_sllv_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_sllv_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_sllv_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sllv_test.gen_value_test ), asm_test( pisa.pisa_inst_sllv_test.gen_random_test ), ]) def test_sllv( name, test ): run_test( test ) #------------------------------------------------------------------------- # srlv #------------------------------------------------------------------------- import pisa.pisa_inst_srlv_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_srlv_test.gen_basic_test ), asm_test( pisa.pisa_inst_srlv_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_srlv_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_srlv_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_srlv_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_srlv_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_srlv_test.gen_value_test ), asm_test( pisa.pisa_inst_srlv_test.gen_random_test ), ]) def test_srlv( name, test ): run_test( test ) #------------------------------------------------------------------------- # srav #------------------------------------------------------------------------- import pisa.pisa_inst_srav_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_srav_test.gen_basic_test ), asm_test( pisa.pisa_inst_srav_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_srav_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_srav_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_srav_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_srav_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_srav_test.gen_value_test ), asm_test( pisa.pisa_inst_srav_test.gen_random_test ), ]) def test_srav( name, test ): run_test( test ) #------------------------------------------------------------------------- # lui #------------------------------------------------------------------------- import pisa.pisa_inst_lui_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lui_test.gen_basic_test ), asm_test( pisa.pisa_inst_lui_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lui_test.gen_value_test ), asm_test( pisa.pisa_inst_lui_test.gen_random_test ), ]) def test_lui( name, test ): run_test( test ) #------------------------------------------------------------------------- # mul #------------------------------------------------------------------------- import pisa.pisa_inst_mul_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_mul_test.gen_basic_test ), asm_test( pisa.pisa_inst_mul_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_mul_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_mul_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_mul_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_mul_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_mul_test.gen_value_test ), asm_test( pisa.pisa_inst_mul_test.gen_random_test ), ]) def test_mul( name, test ): run_test( test ) #------------------------------------------------------------------------- # div #------------------------------------------------------------------------- import pisa.pisa_inst_div_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_div_test.gen_basic_test ), asm_test( pisa.pisa_inst_div_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_div_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_div_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_div_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_div_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_div_test.gen_value_test ), asm_test( pisa.pisa_inst_div_test.gen_random_test ), ]) def test_div( name, test ): run_test( test ) #------------------------------------------------------------------------- # divu #------------------------------------------------------------------------- import pisa.pisa_inst_divu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_divu_test.gen_basic_test ), asm_test( pisa.pisa_inst_divu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_divu_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_divu_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_divu_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_divu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_divu_test.gen_value_test ), asm_test( pisa.pisa_inst_divu_test.gen_random_test ), ]) def test_divu( name, test ): run_test( test ) #------------------------------------------------------------------------- # rem #------------------------------------------------------------------------- import pisa.pisa_inst_rem_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_rem_test.gen_basic_test ), asm_test( pisa.pisa_inst_rem_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_rem_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_rem_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_rem_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_rem_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_rem_test.gen_value_test ), asm_test( pisa.pisa_inst_rem_test.gen_random_test ), ]) def test_rem( name, test ): run_test( test ) #------------------------------------------------------------------------- # remu #------------------------------------------------------------------------- import pisa.pisa_inst_remu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_remu_test.gen_basic_test ), asm_test( pisa.pisa_inst_remu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_remu_test.gen_src0_byp_test ), asm_test( pisa.pisa_inst_remu_test.gen_src1_byp_test ), asm_test( pisa.pisa_inst_remu_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_remu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_remu_test.gen_value_test ), asm_test( pisa.pisa_inst_remu_test.gen_random_test ), ]) def test_remu( name, test ): run_test( test ) #------------------------------------------------------------------------- # lw #------------------------------------------------------------------------- import pisa.pisa_inst_lw_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lw_test.gen_basic_test ), asm_test( pisa.pisa_inst_lw_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lw_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_lw_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_lw_test.gen_value_test ), asm_test( pisa.pisa_inst_lw_test.gen_random_test ), ]) def test_lw( name, test ): run_test( test ) #------------------------------------------------------------------------- # lh #------------------------------------------------------------------------- import pisa.pisa_inst_lh_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lh_test.gen_basic_test ), asm_test( pisa.pisa_inst_lh_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lh_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_lh_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_lh_test.gen_value_test ), asm_test( pisa.pisa_inst_lh_test.gen_random_test ), ]) def test_lh( name, test ): run_test( test ) #------------------------------------------------------------------------- # lhu #------------------------------------------------------------------------- import pisa.pisa_inst_lhu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lhu_test.gen_basic_test ), asm_test( pisa.pisa_inst_lhu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lhu_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_lhu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_lhu_test.gen_value_test ), asm_test( pisa.pisa_inst_lhu_test.gen_random_test ), ]) def test_lhu( name, test ): run_test( test ) #------------------------------------------------------------------------- # lb #------------------------------------------------------------------------- import pisa.pisa_inst_lb_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lb_test.gen_basic_test ), asm_test( pisa.pisa_inst_lb_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lb_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_lb_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_lb_test.gen_value_test ), asm_test( pisa.pisa_inst_lb_test.gen_random_test ), ]) def test_lb( name, test ): run_test( test ) #------------------------------------------------------------------------- # lbu #------------------------------------------------------------------------- import pisa.pisa_inst_lbu_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_lbu_test.gen_basic_test ), asm_test( pisa.pisa_inst_lbu_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_lbu_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_lbu_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_lbu_test.gen_value_test ), asm_test( pisa.pisa_inst_lbu_test.gen_random_test ), ]) def test_lbu( name, test ): run_test( test ) #------------------------------------------------------------------------- # sw #------------------------------------------------------------------------- import pisa.pisa_inst_sw_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sw_test.gen_basic_test ), asm_test( pisa.pisa_inst_sw_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sw_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_sw_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sw_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_sw_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sw_test.gen_value_test ), asm_test( pisa.pisa_inst_sw_test.gen_random_test ), ]) def test_sw( name, test ): run_test( test ) #------------------------------------------------------------------------- # sh #------------------------------------------------------------------------- import pisa.pisa_inst_sh_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sh_test.gen_basic_test ), asm_test( pisa.pisa_inst_sh_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sh_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_sh_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sh_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_sh_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sh_test.gen_value_test ), asm_test( pisa.pisa_inst_sh_test.gen_random_test ), ]) def test_sh( name, test ): run_test( test ) #------------------------------------------------------------------------- # sb #------------------------------------------------------------------------- import pisa.pisa_inst_sb_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_sb_test.gen_basic_test ), asm_test( pisa.pisa_inst_sb_test.gen_dest_byp_test ), asm_test( pisa.pisa_inst_sb_test.gen_base_byp_test ), asm_test( pisa.pisa_inst_sb_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_sb_test.gen_srcs_byp_test ), asm_test( pisa.pisa_inst_sb_test.gen_srcs_dest_test ), asm_test( pisa.pisa_inst_sb_test.gen_value_test ), asm_test( pisa.pisa_inst_sb_test.gen_random_test ), ]) def test_sb( name, test ): run_test( test ) #------------------------------------------------------------------------- # j #------------------------------------------------------------------------- import pisa.pisa_inst_j_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_j_test.gen_basic_test ), asm_test( pisa.pisa_inst_j_test.gen_jump_test ), ]) def test_j( name, test ): run_test( test ) #------------------------------------------------------------------------- # jal #------------------------------------------------------------------------- import pisa.pisa_inst_jal_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_jal_test.gen_basic_test ), asm_test( pisa.pisa_inst_jal_test.gen_link_byp_test ), asm_test( pisa.pisa_inst_jal_test.gen_jump_test ), ]) def test_jal( name, test ): run_test( test ) #------------------------------------------------------------------------- # jr #------------------------------------------------------------------------- import pisa.pisa_inst_jr_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_jr_test.gen_basic_test ), asm_test( pisa.pisa_inst_jr_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_jr_test.gen_jump_test ), ]) def test_jr( name, test ): run_test( test ) #------------------------------------------------------------------------- # jalr #------------------------------------------------------------------------- import pisa.pisa_inst_jalr_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_jalr_test.gen_basic_test ), asm_test( pisa.pisa_inst_jalr_test.gen_link_byp_test ), asm_test( pisa.pisa_inst_jalr_test.gen_src_byp_test ), asm_test( pisa.pisa_inst_jalr_test.gen_jump_test ), ]) def test_jalr( name, test ): run_test( test ) #------------------------------------------------------------------------- # beq #------------------------------------------------------------------------- import pisa.pisa_inst_beq_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_beq_test.gen_basic_test ), asm_test( pisa.pisa_inst_beq_test.gen_src0_byp_taken_test ), asm_test( pisa.pisa_inst_beq_test.gen_src0_byp_nottaken_test ), asm_test( pisa.pisa_inst_beq_test.gen_src1_byp_taken_test ), asm_test( pisa.pisa_inst_beq_test.gen_src1_byp_nottaken_test ), asm_test( pisa.pisa_inst_beq_test.gen_srcs_byp_taken_test ), asm_test( pisa.pisa_inst_beq_test.gen_srcs_byp_nottaken_test ), asm_test( pisa.pisa_inst_beq_test.gen_src0_eq_src1_test ), asm_test( pisa.pisa_inst_beq_test.gen_value_test ), asm_test( pisa.pisa_inst_beq_test.gen_random_test ), ]) def test_beq( name, test ): run_test( test ) #------------------------------------------------------------------------- # bne #------------------------------------------------------------------------- import pisa.pisa_inst_bne_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_bne_test.gen_basic_test ), asm_test( pisa.pisa_inst_bne_test.gen_src0_byp_taken_test ), asm_test( pisa.pisa_inst_bne_test.gen_src0_byp_nottaken_test ), asm_test( pisa.pisa_inst_bne_test.gen_src1_byp_taken_test ), asm_test( pisa.pisa_inst_bne_test.gen_src1_byp_nottaken_test ), asm_test( pisa.pisa_inst_bne_test.gen_srcs_byp_taken_test ), asm_test( pisa.pisa_inst_bne_test.gen_srcs_byp_nottaken_test ), asm_test( pisa.pisa_inst_bne_test.gen_src0_eq_src1_test ), asm_test( pisa.pisa_inst_bne_test.gen_value_test ), asm_test( pisa.pisa_inst_bne_test.gen_random_test ), ]) def test_bne( name, test ): run_test( test ) #------------------------------------------------------------------------- # blez #------------------------------------------------------------------------- import pisa.pisa_inst_blez_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_blez_test.gen_basic_test ), asm_test( pisa.pisa_inst_blez_test.gen_src_byp_taken_test ), asm_test( pisa.pisa_inst_blez_test.gen_src_byp_nottaken_test ), asm_test( pisa.pisa_inst_blez_test.gen_value_test ), asm_test( pisa.pisa_inst_blez_test.gen_random_test ), ]) def test_blez( name, test ): run_test( test ) #------------------------------------------------------------------------- # bgtz #------------------------------------------------------------------------- import pisa.pisa_inst_bgtz_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_bgtz_test.gen_basic_test ), asm_test( pisa.pisa_inst_bgtz_test.gen_src_byp_taken_test ), asm_test( pisa.pisa_inst_bgtz_test.gen_src_byp_nottaken_test ), asm_test( pisa.pisa_inst_bgtz_test.gen_value_test ), asm_test( pisa.pisa_inst_bgtz_test.gen_random_test ), ]) def test_bgtz( name, test ): run_test( test ) #------------------------------------------------------------------------- # bltz #------------------------------------------------------------------------- import pisa.pisa_inst_bltz_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_bltz_test.gen_basic_test ), asm_test( pisa.pisa_inst_bltz_test.gen_src_byp_taken_test ), asm_test( pisa.pisa_inst_bltz_test.gen_src_byp_nottaken_test ), asm_test( pisa.pisa_inst_bltz_test.gen_value_test ), asm_test( pisa.pisa_inst_bltz_test.gen_random_test ), ]) def test_bltz( name, test ): run_test( test ) #------------------------------------------------------------------------- # bgez #------------------------------------------------------------------------- import pisa.pisa_inst_bgez_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_bgez_test.gen_basic_test ), asm_test( pisa.pisa_inst_bgez_test.gen_src_byp_taken_test ), asm_test( pisa.pisa_inst_bgez_test.gen_src_byp_nottaken_test ), asm_test( pisa.pisa_inst_bgez_test.gen_value_test ), asm_test( pisa.pisa_inst_bgez_test.gen_random_test ), ]) def test_bgez( name, test ): run_test( test ) #------------------------------------------------------------------------- # mtx/mfx #------------------------------------------------------------------------- import pisa.pisa_inst_xcel_test @pytest.mark.parametrize( "name,test", [ asm_test( pisa.pisa_inst_xcel_test.gen_basic_test ), asm_test( pisa.pisa_inst_xcel_test.gen_bypass_mtx_test ), asm_test( pisa.pisa_inst_xcel_test.gen_bypass_mfx_test ), asm_test( pisa.pisa_inst_xcel_test.gen_bypass_test ), ]) def test_mtx( name, test ): run_test( test )
unknown
codeparrot/codeparrot-clean
/* origin: FreeBSD /usr/src/lib/msun/src/s_expm1f.c */ /* * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com. */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ const O_THRESHOLD: f32 = 8.8721679688e+01; /* 0x42b17180 */ const LN2_HI: f32 = 6.9313812256e-01; /* 0x3f317180 */ const LN2_LO: f32 = 9.0580006145e-06; /* 0x3717f7d1 */ const INV_LN2: f32 = 1.4426950216e+00; /* 0x3fb8aa3b */ /* * Domain [-0.34568, 0.34568], range ~[-6.694e-10, 6.696e-10]: * |6 / x * (1 + 2 * (1 / (exp(x) - 1) - 1 / x)) - q(x)| < 2**-30.04 * Scaled coefficients: Qn_here = 2**n * Qn_for_q (see s_expm1.c): */ const Q1: f32 = -3.3333212137e-2; /* -0x888868.0p-28 */ const Q2: f32 = 1.5807170421e-3; /* 0xcf3010.0p-33 */ /// Exponential, base *e*, of x-1 (f32) /// /// Calculates the exponential of `x` and subtract 1, that is, *e* raised /// to the power `x` minus 1 (where *e* is the base of the natural /// system of logarithms, approximately 2.71828). /// The result is accurate even for small values of `x`, /// where using `exp(x)-1` would lose many significant digits. #[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)] pub fn expm1f(mut x: f32) -> f32 { let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127 let mut hx = x.to_bits(); let sign = (hx >> 31) != 0; hx &= 0x7fffffff; /* filter out huge and non-finite argument */ if hx >= 0x4195b844 { /* if |x|>=27*ln2 */ if hx > 0x7f800000 { /* NaN */ return x; } if sign { return -1.; } if x > O_THRESHOLD { x *= x1p127; return x; } } let k: i32; let hi: f32; let lo: f32; let mut c = 0f32; /* argument reduction */ if hx > 0x3eb17218 { /* if |x| > 0.5 ln2 */ if hx < 0x3F851592 { /* and |x| < 1.5 ln2 */ if !sign { hi = x - LN2_HI; lo = LN2_LO; k = 1; } else { hi = x + LN2_HI; lo = -LN2_LO; k = -1; } } else { k = (INV_LN2 * x + (if sign { -0.5 } else { 0.5 })) as i32; let t = k as f32; hi = x - t * LN2_HI; /* t*ln2_hi is exact here */ lo = t * LN2_LO; } x = hi - lo; c = (hi - x) - lo; } else if hx < 0x33000000 { /* when |x|<2**-25, return x */ if hx < 0x00800000 { force_eval!(x * x); } return x; } else { k = 0; } /* x is now in primary range */ let hfx = 0.5 * x; let hxs = x * hfx; let r1 = 1. + hxs * (Q1 + hxs * Q2); let t = 3. - r1 * hfx; let mut e = hxs * ((r1 - t) / (6. - x * t)); if k == 0 { /* c is 0 */ return x - (x * e - hxs); } e = x * (e - c) - c; e -= hxs; /* exp(x) ~ 2^k (x_reduced - e + 1) */ if k == -1 { return 0.5 * (x - e) - 0.5; } if k == 1 { if x < -0.25 { return -2. * (e - (x + 0.5)); } return 1. + 2. * (x - e); } let twopk = f32::from_bits(((0x7f + k) << 23) as u32); /* 2^k */ if !(0..=56).contains(&k) { /* suffice to return exp(x)-1 */ let mut y = x - e + 1.; if k == 128 { y = y * 2. * x1p127; } else { y = y * twopk; } return y - 1.; } let uf = f32::from_bits(((0x7f - k) << 23) as u32); /* 2^-k */ if k < 23 { (x - e + (1. - uf)) * twopk } else { (x - (e + uf) + 1.) * twopk } }
rust
github
https://github.com/nodejs/node
deps/crates/vendor/libm/src/math/expm1f.rs